python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
#include "adf_c62xvf_hw_data.h"
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
.instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_ACCELERATORS_MASK;
}
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_MAX_ACCELERATORS;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_MAX_ACCELENGINES;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62XIOV_ETR_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_VF;
}
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62xiov_class;
hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->dev_class->instances++;
hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}
| linux-master | drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_dbgfs.h>
#include "adf_c62xvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C62XVF_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
struct adf_accel_dev *pf;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
adf_clean_hw_data_c62xiov(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
switch (ent->device) {
case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table */
if (adf_devmgr_add_dev(accel_dev, pf)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c62xiov(accel_dev->hw_device);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
adf_dbgfs_init(accel_dev);
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
adf_flush_vf_wq(accel_dev);
adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
adf_clean_vf_map(true);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
| linux-master | drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/container_of.h>
#include <linux/dev_printk.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_timer.h"
#define ADF_GEN4_TIMER_PERIOD_MS 200
/* This periodic update is used to trigger HB, RL & TL fw events */
static void work_handler(struct work_struct *work)
{
struct adf_accel_dev *accel_dev;
struct adf_timer *timer_ctx;
u32 time_periods;
timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
accel_dev = timer_ctx->accel_dev;
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
ADF_GEN4_TIMER_PERIOD_MS);
if (adf_send_admin_tim_sync(accel_dev, time_periods))
dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
}
int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx;
timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
if (!timer_ctx)
return -ENOMEM;
timer_ctx->accel_dev = accel_dev;
accel_dev->timer = timer_ctx;
timer_ctx->initial_ktime = ktime_get_real();
INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx = accel_dev->timer;
if (!timer_ctx)
return;
cancel_delayed_work_sync(&timer_ctx->work_ctx);
kfree(timer_ctx);
accel_dev->timer = NULL;
}
EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/list.h>
#include "adf_cfg.h"
#include "adf_common_drv.h"
static LIST_HEAD(accel_table);
static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
static u32 num_devices;
static u8 id_map[ADF_MAX_DEVICES];
struct vf_id_map {
u32 bdf;
u32 id;
u32 fake_id;
bool attached;
struct list_head list;
};
static int adf_get_vf_id(struct adf_accel_dev *vf)
{
return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
}
static int adf_get_vf_num(struct adf_accel_dev *vf)
{
return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
}
static struct vf_id_map *adf_find_vf(u32 bdf)
{
struct list_head *itr;
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->bdf == bdf)
return ptr;
}
return NULL;
}
static int adf_get_vf_real_id(u32 fake)
{
struct list_head *itr;
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->fake_id == fake)
return ptr->id;
}
return -1;
}
/**
* adf_clean_vf_map() - Cleans VF id mapings
*
* Function cleans internal ids for virtual functions.
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
*/
void adf_clean_vf_map(bool vf)
{
struct vf_id_map *map;
struct list_head *ptr, *tmp;
mutex_lock(&table_lock);
list_for_each_safe(ptr, tmp, &vfs_table) {
map = list_entry(ptr, struct vf_id_map, list);
if (map->bdf != -1) {
id_map[map->id] = 0;
num_devices--;
}
if (vf && map->bdf == -1)
continue;
list_del(ptr);
kfree(map);
}
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_clean_vf_map);
/**
* adf_devmgr_update_class_index() - Update internal index
* @hw_data: Pointer to internal device data.
*
* Function updates internal dev index for VFs
*/
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
{
struct adf_hw_device_class *class = hw_data->dev_class;
struct list_head *itr;
int i = 0;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr->hw_device->dev_class == class)
ptr->hw_device->instance_id = i++;
if (i == class->instances)
break;
}
}
EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
static unsigned int adf_find_free_id(void)
{
unsigned int i;
for (i = 0; i < ADF_MAX_DEVICES; i++) {
if (!id_map[i]) {
id_map[i] = 1;
return i;
}
}
return ADF_MAX_DEVICES + 1;
}
/**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device.
* @pf: Corresponding PF if the accel_dev is a VF
*
* Function adds acceleration device to the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf)
{
struct list_head *itr;
int ret = 0;
if (num_devices == ADF_MAX_DEVICES) {
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
ADF_MAX_DEVICES);
return -EFAULT;
}
mutex_lock(&table_lock);
atomic_set(&accel_dev->ref_count, 0);
/* PF on host or VF on guest - optimized to remove redundant is_vf */
if (!accel_dev->is_vf || !pf) {
struct vf_id_map *map;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr == accel_dev) {
ret = -EEXIST;
goto unlock;
}
}
list_add_tail(&accel_dev->list, &accel_table);
accel_dev->accel_id = adf_find_free_id();
if (accel_dev->accel_id > ADF_MAX_DEVICES) {
ret = -EFAULT;
goto unlock;
}
num_devices++;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto unlock;
}
map->bdf = ~0;
map->id = accel_dev->accel_id;
map->fake_id = map->id;
map->attached = true;
list_add_tail(&map->list, &vfs_table);
} else if (accel_dev->is_vf && pf) {
/* VF on host */
struct vf_id_map *map;
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (map) {
struct vf_id_map *next;
accel_dev->accel_id = map->id;
list_add_tail(&accel_dev->list, &accel_table);
map->fake_id++;
map->attached = true;
next = list_next_entry(map, list);
while (next && &next->list != &vfs_table) {
next->fake_id++;
next = list_next_entry(next, list);
}
ret = 0;
goto unlock;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto unlock;
}
accel_dev->accel_id = adf_find_free_id();
if (accel_dev->accel_id > ADF_MAX_DEVICES) {
kfree(map);
ret = -EFAULT;
goto unlock;
}
num_devices++;
list_add_tail(&accel_dev->list, &accel_table);
map->bdf = adf_get_vf_num(accel_dev);
map->id = accel_dev->accel_id;
map->fake_id = map->id;
map->attached = true;
list_add_tail(&map->list, &vfs_table);
}
mutex_init(&accel_dev->state_lock);
unlock:
mutex_unlock(&table_lock);
return ret;
}
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
struct list_head *adf_devmgr_get_head(void)
{
return &accel_table;
}
/**
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
* @accel_dev: Pointer to acceleration device.
* @pf: Corresponding PF if the accel_dev is a VF
*
* Function removes acceleration device from the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf)
{
mutex_lock(&table_lock);
/* PF on host or VF on guest - optimized to remove redundant is_vf */
if (!accel_dev->is_vf || !pf) {
id_map[accel_dev->accel_id] = 0;
num_devices--;
} else if (accel_dev->is_vf && pf) {
struct vf_id_map *map, *next;
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (!map) {
dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
goto unlock;
}
map->fake_id--;
map->attached = false;
next = list_next_entry(map, list);
while (next && &next->list != &vfs_table) {
next->fake_id--;
next = list_next_entry(next, list);
}
}
unlock:
mutex_destroy(&accel_dev->state_lock);
list_del(&accel_dev->list);
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
struct adf_accel_dev *adf_devmgr_get_first(void)
{
struct adf_accel_dev *dev = NULL;
if (!list_empty(&accel_table))
dev = list_first_entry(&accel_table, struct adf_accel_dev,
list);
return dev;
}
/**
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
* @pci_dev: Pointer to PCI device.
*
* Function returns acceleration device associated with the given PCI device.
* To be used by QAT device specific drivers.
*
* Return: pointer to accel_dev or NULL if not found.
*/
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
{
struct list_head *itr;
mutex_lock(&table_lock);
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr->accel_pci_dev.pci_dev == pci_dev) {
mutex_unlock(&table_lock);
return ptr;
}
}
mutex_unlock(&table_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
{
struct list_head *itr;
int real_id;
mutex_lock(&table_lock);
real_id = adf_get_vf_real_id(id);
if (real_id < 0)
goto unlock;
id = real_id;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr->accel_id == id) {
mutex_unlock(&table_lock);
return ptr;
}
}
unlock:
mutex_unlock(&table_lock);
return NULL;
}
int adf_devmgr_verify_id(u32 id)
{
if (id == ADF_CFG_ALL_DEVICES)
return 0;
if (adf_devmgr_get_dev_by_id(id))
return 0;
return -ENODEV;
}
static int adf_get_num_dettached_vfs(void)
{
struct list_head *itr;
int vfs = 0;
mutex_lock(&table_lock);
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->bdf != ~0 && !ptr->attached)
vfs++;
}
mutex_unlock(&table_lock);
return vfs;
}
void adf_devmgr_get_num_dev(u32 *num)
{
*num = num_devices - adf_get_num_dettached_vfs();
}
/**
* adf_dev_in_use() - Check whether accel_dev is currently in use
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when device is in use, 0 otherwise.
*/
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
{
return atomic_read(&accel_dev->ref_count) != 0;
}
EXPORT_SYMBOL_GPL(adf_dev_in_use);
/**
* adf_dev_get() - Increment accel_dev reference count
* @accel_dev: Pointer to acceleration device.
*
* Increment the accel_dev refcount and if this is the first time
* incrementing it during this period the accel_dev is in use,
* increment the module refcount too.
* To be used by QAT device specific drivers.
*
* Return: 0 when successful, EFAULT when fail to bump module refcount
*/
int adf_dev_get(struct adf_accel_dev *accel_dev)
{
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
if (!try_module_get(accel_dev->owner))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_get);
/**
* adf_dev_put() - Decrement accel_dev reference count
* @accel_dev: Pointer to acceleration device.
*
* Decrement the accel_dev refcount and if this is the last time
* decrementing it during this period the accel_dev is in use,
* decrement the module refcount too.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_dev_put(struct adf_accel_dev *accel_dev)
{
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
module_put(accel_dev->owner);
}
EXPORT_SYMBOL_GPL(adf_dev_put);
/**
* adf_devmgr_in_reset() - Check whether device is in reset
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when the device is being reset, 0 otherwise.
*/
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
}
EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
/**
* adf_dev_started() - Check whether device has started
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when the device has started, 0 otherwise
*/
int adf_dev_started(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
}
EXPORT_SYMBOL_GPL(adf_dev_started);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
#include "adf_heartbeat.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
static void adf_service_add(struct service_hndl *service)
{
mutex_lock(&service_lock);
list_add(&service->list, &service_table);
mutex_unlock(&service_lock);
}
int adf_service_register(struct service_hndl *service)
{
memset(service->init_status, 0, sizeof(service->init_status));
memset(service->start_status, 0, sizeof(service->start_status));
adf_service_add(service);
return 0;
}
static void adf_service_remove(struct service_hndl *service)
{
mutex_lock(&service_lock);
list_del(&service->list);
mutex_unlock(&service_lock);
}
int adf_service_unregister(struct service_hndl *service)
{
int i;
for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
if (service->init_status[i] || service->start_status[i]) {
pr_err("QAT: Could not remove active service\n");
return -EFAULT;
}
}
adf_service_remove(service);
return 0;
}
/**
* adf_dev_init() - Init data structures and services for the given accel device
* @accel_dev: Pointer to acceleration device.
*
* Initialize the ring data structures and the admin comms and arbitration
* services.
*
* Return: 0 on success, error code otherwise.
*/
static int adf_dev_init(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int ret;
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
"Failed to init device - hw_data not set\n");
return -EFAULT;
}
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
!accel_dev->is_vf) {
dev_err(&GET_DEV(accel_dev), "Device not configured\n");
return -EFAULT;
}
if (adf_init_etr_data(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
return -EFAULT;
}
if (hw_data->init_device && hw_data->init_device(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
return -EFAULT;
}
if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
return -EFAULT;
}
if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
return -EFAULT;
}
if (adf_ae_init(accel_dev)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise Acceleration Engine\n");
return -EFAULT;
}
set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
if (adf_ae_fw_load(accel_dev)) {
dev_err(&GET_DEV(accel_dev),
"Failed to load acceleration FW\n");
return -EFAULT;
}
set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
if (hw_data->alloc_irq(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
return -EFAULT;
}
set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
hw_data->enable_ints(accel_dev);
hw_data->enable_error_correction(accel_dev);
ret = hw_data->pfvf_ops.enable_comms(accel_dev);
if (ret)
return ret;
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
accel_dev->is_vf) {
if (qat_crypto_vf_dev_config(accel_dev))
return -EFAULT;
}
adf_heartbeat_init(accel_dev);
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
* prior to starting any of the accelerators.
*/
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise service %s\n",
service->name);
return -EFAULT;
}
set_bit(accel_dev->accel_id, service->init_status);
}
return 0;
}
/**
* adf_dev_start() - Start acceleration service for the given accel device
* @accel_dev: Pointer to acceleration device.
*
* Function notifies all the registered services that the acceleration device
* is ready to be used.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
static int adf_dev_start(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
if (adf_ae_start(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
return -EFAULT;
}
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
if (hw_data->send_admin_init(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
return -EFAULT;
}
if (hw_data->measure_clock) {
ret = hw_data->measure_clock(accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
return ret;
}
}
/* Set ssm watch dog timer */
if (hw_data->set_ssm_wdtimer)
hw_data->set_ssm_wdtimer(accel_dev);
/* Enable Power Management */
if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
return -EFAULT;
}
if (hw_data->start_timer) {
ret = hw_data->start_timer(accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
return ret;
}
}
adf_heartbeat_start(accel_dev);
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
"Failed to start service %s\n",
service->name);
return -EFAULT;
}
set_bit(accel_dev->accel_id, service->start_status);
}
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (!list_empty(&accel_dev->crypto_list) &&
(qat_algs_register() || qat_asym_algs_register())) {
dev_err(&GET_DEV(accel_dev),
"Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return -EFAULT;
}
if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
dev_err(&GET_DEV(accel_dev),
"Failed to register compression algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return -EFAULT;
}
adf_dbgfs_add(accel_dev);
return 0;
}
/**
* adf_dev_stop() - Stop acceleration service for the given accel device
* @accel_dev: Pointer to acceleration device.
*
* Function notifies all the registered services that the acceleration device
* is shuting down.
* To be used by QAT device specific drivers.
*
* Return: void
*/
static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
bool wait = false;
int ret;
if (!adf_dev_started(accel_dev) &&
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
adf_dbgfs_rm(accel_dev);
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (!list_empty(&accel_dev->crypto_list)) {
qat_algs_unregister();
qat_asym_algs_unregister();
}
if (!list_empty(&accel_dev->compression_list))
qat_comp_algs_unregister();
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (!test_bit(accel_dev->accel_id, service->start_status))
continue;
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
if (!ret) {
clear_bit(accel_dev->accel_id, service->start_status);
} else if (ret == -EAGAIN) {
wait = true;
clear_bit(accel_dev->accel_id, service->start_status);
}
}
if (hw_data->stop_timer)
hw_data->stop_timer(accel_dev);
if (wait)
msleep(100);
if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
if (adf_ae_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
}
/**
* adf_dev_shutdown() - shutdown acceleration services and data strucutures
* @accel_dev: Pointer to acceleration device
*
* Cleanup the ring data structures and the admin comms and arbitration
* services.
*/
static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
"QAT: Failed to shutdown device - hw_data not set\n");
return;
}
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
adf_ae_fw_release(accel_dev);
clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
}
if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
if (adf_ae_shutdown(accel_dev))
dev_err(&GET_DEV(accel_dev),
"Failed to shutdown Accel Engine\n");
else
clear_bit(ADF_STATUS_AE_INITIALISED,
&accel_dev->status);
}
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (!test_bit(accel_dev->accel_id, service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
dev_err(&GET_DEV(accel_dev),
"Failed to shutdown service %s\n",
service->name);
else
clear_bit(accel_dev->accel_id, service->init_status);
}
adf_heartbeat_shutdown(accel_dev);
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
}
/* Delete configuration only if not restarting */
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
adf_cfg_del_all(accel_dev);
if (hw_data->exit_arb)
hw_data->exit_arb(accel_dev);
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
service->name);
}
return 0;
}
int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
service->name);
}
return 0;
}
static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
if (!ret) {
ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
if (ret)
return ret;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED,
services, ADF_STR);
if (ret)
return ret;
}
return 0;
}
int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
{
int ret = 0;
if (!accel_dev)
return -EINVAL;
mutex_lock(&accel_dev->state_lock);
if (!adf_dev_started(accel_dev)) {
dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
accel_dev->accel_id);
ret = -EINVAL;
goto out;
}
if (reconfig) {
ret = adf_dev_shutdown_cache_cfg(accel_dev);
goto out;
}
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
out:
mutex_unlock(&accel_dev->state_lock);
return ret;
}
EXPORT_SYMBOL_GPL(adf_dev_down);
int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
{
int ret = 0;
if (!accel_dev)
return -EINVAL;
mutex_lock(&accel_dev->state_lock);
if (adf_dev_started(accel_dev)) {
dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
accel_dev->accel_id);
ret = -EALREADY;
goto out;
}
if (config && GET_HW_DATA(accel_dev)->dev_config) {
ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
if (unlikely(ret))
goto out;
}
ret = adf_dev_init(accel_dev);
if (unlikely(ret))
goto out;
ret = adf_dev_start(accel_dev);
out:
mutex_unlock(&accel_dev->state_lock);
return ret;
}
EXPORT_SYMBOL_GPL(adf_dev_up);
int adf_dev_restart(struct adf_accel_dev *accel_dev)
{
int ret = 0;
if (!accel_dev)
return -EFAULT;
adf_dev_down(accel_dev, false);
ret = adf_dev_up(accel_dev, false);
/* if device is already up return success*/
if (ret == -EALREADY)
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(adf_dev_restart);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_init.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
static const char * const state_operations[] = {
[DEV_DOWN] = "down",
[DEV_UP] = "up",
};
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adf_accel_dev *accel_dev;
char *state;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
state = adf_dev_started(accel_dev) ? "up" : "down";
return sysfs_emit(buf, "%s\n", state);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adf_accel_dev *accel_dev;
u32 accel_id;
int ret;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
accel_id = accel_dev->accel_id;
if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
return -EBUSY;
}
ret = sysfs_match_string(state_operations, buf);
if (ret < 0)
return ret;
switch (ret) {
case DEV_DOWN:
dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
ret = adf_dev_down(accel_dev, true);
if (ret < 0)
return -EINVAL;
break;
case DEV_UP:
dev_info(dev, "Starting device qat_dev%d\n", accel_id);
ret = adf_dev_up(accel_dev, true);
if (ret < 0) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
adf_dev_down(accel_dev, true);
return ret;
}
break;
default:
return -EINVAL;
}
return count;
}
static const char * const services_operations[] = {
ADF_CFG_CY,
ADF_CFG_DC,
ADF_CFG_SYM,
ADF_CFG_ASYM,
ADF_CFG_ASYM_SYM,
ADF_CFG_ASYM_DC,
ADF_CFG_DC_ASYM,
ADF_CFG_SYM_DC,
ADF_CFG_DC_SYM,
};
static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
struct adf_accel_dev *accel_dev;
int ret;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
if (ret)
return ret;
return sysfs_emit(buf, "%s\n", services);
}
static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
const char *services)
{
return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services,
ADF_STR);
}
static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adf_hw_device_data *hw_data;
struct adf_accel_dev *accel_dev;
int ret;
ret = sysfs_match_string(services_operations, buf);
if (ret < 0)
return ret;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
if (adf_dev_started(accel_dev)) {
dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
accel_dev->accel_id);
return -EINVAL;
}
ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
if (ret < 0)
return ret;
hw_data = GET_HW_DATA(accel_dev);
/* Update capabilities mask after change in configuration.
* A call to this function is required as capabilities are, at the
* moment, tied to configuration
*/
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
if (!hw_data->accel_capabilities_mask)
return -EINVAL;
return count;
}
static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
struct adf_accel_dev *accel_dev;
int ret;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
if (ret)
return sysfs_emit(buf, "1\n");
return sysfs_emit(buf, "%s\n", pm_idle_enabled);
}
static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long pm_idle_enabled_cfg_val;
struct adf_accel_dev *accel_dev;
bool pm_idle_enabled;
int ret;
ret = kstrtobool(buf, &pm_idle_enabled);
if (ret)
return ret;
pm_idle_enabled_cfg_val = pm_idle_enabled;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
if (adf_dev_started(accel_dev)) {
dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
accel_dev->accel_id);
return -EINVAL;
}
ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
ADF_DEC);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RW(pm_idle_enabled);
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
static struct attribute *qat_attrs[] = {
&dev_attr_state.attr,
&dev_attr_cfg_services.attr,
&dev_attr_pm_idle_enabled.attr,
NULL,
};
static struct attribute_group qat_group = {
.attrs = qat_attrs,
.name = "qat",
};
int adf_sysfs_init(struct adf_accel_dev *accel_dev)
{
int ret;
ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to create qat attribute group: %d\n", ret);
}
return ret;
}
EXPORT_SYMBOL_GPL(adf_sysfs_init);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_sysfs.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
static struct workqueue_struct *adf_misc_wq;
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 msix_num_entries = hw_data->num_banks + 1;
int ret;
if (hw_data->set_msix_rttable)
hw_data->set_msix_rttable(accel_dev);
ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
msix_num_entries, PCI_IRQ_MSIX);
if (unlikely(ret < 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to allocate %d MSI-X vectors\n",
msix_num_entries);
return ret;
}
return 0;
}
static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
{
pci_free_irq_vectors(pci_dev_info->pci_dev);
}
static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
{
struct adf_etr_bank_data *bank = bank_ptr;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
0);
tasklet_hi_schedule(&bank->resp_handler);
return IRQ_HANDLED;
}
#ifdef CONFIG_PCI_IOV
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 pending;
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
return pending;
}
static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
{
bool irq_handled = false;
unsigned long vf_mask;
/* Get the interrupt sources triggered by VFs, except for those already disabled */
vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
if (vf_mask) {
struct adf_accel_vf_info *vf_info;
int i;
/*
* Handle VF2PF interrupt unless the VF is malicious and
* is attempting to flood the host OS with VF2PF interrupts.
*/
for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
vf_info = accel_dev->pf.vf_info + i;
if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
dev_info(&GET_DEV(accel_dev),
"Too many ints from VF%d\n",
vf_info->vf_nr);
continue;
}
adf_schedule_vf2pf_handler(vf_info);
irq_handled = true;
}
}
return irq_handled;
}
#endif /* CONFIG_PCI_IOV */
static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
if (hw_data->handle_pm_interrupt &&
hw_data->handle_pm_interrupt(accel_dev))
return true;
return false;
}
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
#ifdef CONFIG_PCI_IOV
/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
return IRQ_HANDLED;
#endif /* CONFIG_PCI_IOV */
if (adf_handle_pm_int(accel_dev))
return IRQ_HANDLED;
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
accel_dev->accel_id);
return IRQ_NONE;
}
static void adf_free_irqs(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
struct adf_etr_data *etr_data = accel_dev->transport;
int clust_irq = hw_data->num_banks;
int irq, i = 0;
if (pci_dev_info->msix_entries.num_entries > 1) {
for (i = 0; i < hw_data->num_banks; i++) {
if (irqs[i].enabled) {
irq = pci_irq_vector(pci_dev_info->pci_dev, i);
irq_set_affinity_hint(irq, NULL);
free_irq(irq, &etr_data->banks[i]);
}
}
}
if (irqs[i].enabled) {
irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
free_irq(irq, accel_dev);
}
}
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
struct adf_etr_data *etr_data = accel_dev->transport;
int clust_irq = hw_data->num_banks;
int ret, irq, i = 0;
char *name;
/* Request msix irq for all banks unless SR-IOV enabled */
if (!accel_dev->pf.vf_info) {
for (i = 0; i < hw_data->num_banks; i++) {
struct adf_etr_bank_data *bank = &etr_data->banks[i];
unsigned int cpu, cpus = num_online_cpus();
name = irqs[i].name;
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
"qat%d-bundle%d", accel_dev->accel_id, i);
irq = pci_irq_vector(pci_dev_info->pci_dev, i);
if (unlikely(irq < 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to get IRQ number of device vector %d - %s\n",
i, name);
ret = irq;
goto err;
}
ret = request_irq(irq, adf_msix_isr_bundle, 0,
&name[0], bank);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to allocate IRQ %d for %s\n",
irq, name);
goto err;
}
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
i) % cpus;
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
irqs[i].enabled = true;
}
}
/* Request msix irq for AE */
name = irqs[i].name;
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
"qat%d-ae-cluster", accel_dev->accel_id);
irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
if (unlikely(irq < 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to get IRQ number of device vector %d - %s\n",
i, name);
ret = irq;
goto err;
}
ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to allocate IRQ %d for %s\n", irq, name);
goto err;
}
irqs[i].enabled = true;
return ret;
err:
adf_free_irqs(accel_dev);
return ret;
}
static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 msix_num_entries = 1;
struct adf_irq *irqs;
/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!irqs)
return -ENOMEM;
accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
return 0;
}
static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
{
kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
}
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *priv_data = accel_dev->transport;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int i;
for (i = 0; i < hw_data->num_banks; i++)
tasklet_init(&priv_data->banks[i].resp_handler,
adf_response_handler,
(unsigned long)&priv_data->banks[i]);
return 0;
}
static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *priv_data = accel_dev->transport;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int i;
for (i = 0; i < hw_data->num_banks; i++) {
tasklet_disable(&priv_data->banks[i].resp_handler);
tasklet_kill(&priv_data->banks[i].resp_handler);
}
}
/**
* adf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device.
*/
void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
adf_free_irqs(accel_dev);
adf_cleanup_bh(accel_dev);
adf_disable_msix(&accel_dev->accel_pci_dev);
adf_isr_free_msix_vectors_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
/**
* adf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device.
*
* Return: 0 on success, error code otherwise.
*/
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
{
int ret;
ret = adf_isr_alloc_msix_vectors_data(accel_dev);
if (ret)
goto err_out;
ret = adf_enable_msix(accel_dev);
if (ret)
goto err_free_msix_table;
ret = adf_setup_bh(accel_dev);
if (ret)
goto err_disable_msix;
ret = adf_request_irqs(accel_dev);
if (ret)
goto err_cleanup_bh;
return 0;
err_cleanup_bh:
adf_cleanup_bh(accel_dev);
err_disable_msix:
adf_disable_msix(&accel_dev->accel_pci_dev);
err_free_msix_table:
adf_isr_free_msix_vectors_data(accel_dev);
err_out:
return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
/**
* adf_init_misc_wq() - Init misc workqueue
*
* Function init workqueue 'qat_misc_wq' for general purpose.
*
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_misc_wq(void)
{
adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
return !adf_misc_wq ? -ENOMEM : 0;
}
void adf_exit_misc_wq(void)
{
if (adf_misc_wq)
destroy_workqueue(adf_misc_wq);
adf_misc_wq = NULL;
}
bool adf_misc_wq_queue_work(struct work_struct *work)
{
return queue_work(adf_misc_wq, work);
}
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(adf_misc_wq, work, delay);
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_isr.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/debugfs.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
#include "adf_fw_counters.h"
#include "adf_heartbeat_dbgfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
* @accel_dev: Pointer to acceleration device.
*
* This function creates debugfs entries that are persistent through a device
* state change (from up to down or vice versa).
*/
void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
{
char name[ADF_DEVICE_NAME_LENGTH];
void *ret;
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
accel_dev->hw_device->dev_class->name,
pci_name(accel_dev->accel_pci_dev.pci_dev));
ret = debugfs_create_dir(name, NULL);
if (IS_ERR_OR_NULL(ret))
return;
accel_dev->debugfs_dir = ret;
adf_cfg_dev_dbgfs_add(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_dbgfs_init);
/**
* adf_dbgfs_exit() - remove persistent debugfs entries
* @accel_dev: Pointer to acceleration device.
*/
void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
{
adf_cfg_dev_dbgfs_rm(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
}
EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
/**
* adf_dbgfs_add() - add non-persistent debugfs entries
* @accel_dev: Pointer to acceleration device.
*
* This function creates debugfs entries that are not persistent through
* a device state change (from up to down or vice versa).
*/
void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
{
if (!accel_dev->debugfs_dir)
return;
if (!accel_dev->is_vf) {
adf_fw_counters_dbgfs_add(accel_dev);
adf_heartbeat_dbgfs_add(accel_dev);
}
}
/**
* adf_dbgfs_rm() - remove non-persistent debugfs entries
* @accel_dev: Pointer to acceleration device.
*/
void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
if (!accel_dev->debugfs_dir)
return;
if (!accel_dev->is_vf) {
adf_heartbeat_dbgfs_rm(accel_dev);
adf_fw_counters_dbgfs_rm(accel_dev);
}
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_dbgfs.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
#include <linux/bitfield.h>
#include <linux/iopoll.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_pm.h"
#include "adf_cfg_strings.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_gen4_hw_data.h"
#include "adf_cfg.h"
enum qat_pm_host_msg {
PM_NO_CHANGE = 0,
PM_SET_MIN,
};
struct adf_gen4_pm_data {
struct work_struct pm_irq_work;
struct adf_accel_dev *accel_dev;
u32 pm_int_sts;
};
static int send_host_msg(struct adf_accel_dev *accel_dev)
{
char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
bool pm_idle_support;
u32 msg;
int ret;
msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
if (msg & ADF_GEN4_PM_MSG_PENDING)
return -EBUSY;
adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_PM_IDLE_SUPPORT, pm_idle_support_cfg);
ret = kstrtobool(pm_idle_support_cfg, &pm_idle_support);
if (ret)
pm_idle_support = true;
/* Send HOST_MSG */
msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK,
pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE);
msg |= ADF_GEN4_PM_MSG_PENDING;
ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
/* Poll status register to make sure the HOST_MSG has been processed */
return read_poll_timeout(ADF_CSR_RD, msg,
!(msg & ADF_GEN4_PM_MSG_PENDING),
ADF_GEN4_PM_MSG_POLL_DELAY_US,
ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
ADF_GEN4_PM_HOST_MSG);
}
static void pm_bh_handler(struct work_struct *work)
{
struct adf_gen4_pm_data *pm_data =
container_of(work, struct adf_gen4_pm_data, pm_irq_work);
struct adf_accel_dev *accel_dev = pm_data->accel_dev;
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
u32 pm_int_sts = pm_data->pm_int_sts;
u32 val;
/* PM Idle interrupt */
if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
/* Issue host message to FW */
if (send_host_msg(accel_dev))
dev_warn_ratelimited(&GET_DEV(accel_dev),
"Failed to send host msg to FW\n");
}
/* Clear interrupt status */
ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
/* Reenable PM interrupt */
val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
val &= ~ADF_GEN4_PM_SOU;
ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
kfree(pm_data);
}
bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
struct adf_gen4_pm_data *pm_data = NULL;
u32 errsou2;
u32 errmsk2;
u32 val;
/* Only handle the interrupt triggered by PM */
errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
if (errmsk2 & ADF_GEN4_PM_SOU)
return false;
errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
if (!(errsou2 & ADF_GEN4_PM_SOU))
return false;
/* Disable interrupt */
val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
val |= ADF_GEN4_PM_SOU;
ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
if (!pm_data)
return false;
pm_data->pm_int_sts = val;
pm_data->accel_dev = accel_dev;
INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
adf_misc_wq_queue_work(&pm_data->pm_irq_work);
return true;
}
EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
int ret;
u32 val;
ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
if (ret)
return ret;
/* Enable default PM interrupts: IDLE, THROTTLE */
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
val |= ADF_GEN4_PM_INT_EN_DEFAULT;
/* Clear interrupt status */
val |= ADF_GEN4_PM_INT_STS_MASK;
ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
/* Unmask PM Interrupt */
val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
val &= ~ADF_GEN4_PM_SOU;
ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/dev_printk.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/overflow.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/errno.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_clock.h"
#include "adf_common_drv.h"
#include "adf_heartbeat.h"
#include "adf_transport_internal.h"
#include "icp_qat_fw_init_admin.h"
#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
/* Heartbeat counter pair */
struct hb_cnt_pair {
__u16 resp_heartbeat_cnt;
__u16 req_heartbeat_cnt;
};
static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
{
u64 curr_time = adf_clock_get_current_time();
u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time;
if (polling_time < accel_dev->heartbeat->hb_timer) {
dev_warn(&GET_DEV(accel_dev),
"HB polling too frequent. Configured HB timer %d ms\n",
accel_dev->heartbeat->hb_timer);
return -EINVAL;
}
accel_dev->heartbeat->last_hb_check_time = curr_time;
return 0;
}
/**
* validate_hb_ctrs_cnt() - checks if the number of heartbeat counters should
* be updated by one to support the currently loaded firmware.
* @accel_dev: Pointer to acceleration device.
*
* Return:
* * true - hb_ctrs must increased by ADF_NUM_PKE_STRAND
* * false - no changes needed
*/
static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev)
{
const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
const size_t max_aes = accel_dev->hw_device->num_engines;
const size_t hb_struct_size = sizeof(struct hb_cnt_pair);
const size_t exp_diff_size = array3_size(ADF_NUM_PKE_STRAND, max_aes,
hb_struct_size);
const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
const size_t stats_size = size_mul(dev_ctrs, hb_struct_size);
const u32 exp_diff_cnt = exp_diff_size / sizeof(u32);
const u32 stats_el_cnt = stats_size / sizeof(u32);
struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
const u32 *mem_to_chk = (u32 *)(hb_stats + dev_ctrs);
u32 el_diff_cnt = 0;
int i;
/* count how many bytes are different from pattern */
for (i = 0; i < stats_el_cnt; i++) {
if (mem_to_chk[i] == ADF_HB_EMPTY_SIG)
break;
el_diff_cnt++;
}
return el_diff_cnt && el_diff_cnt == exp_diff_cnt;
}
void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
{
struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
const size_t max_aes = accel_dev->hw_device->num_engines;
const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair));
const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32);
/* fill hb stats memory with pattern */
memset32((uint32_t *)hb_stats, ADF_HB_EMPTY_SIG, mem_items_to_fill);
accel_dev->heartbeat->ctrs_cnt_checked = false;
}
EXPORT_SYMBOL_GPL(adf_heartbeat_check_ctrs);
static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value)
{
char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
u32 timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
int cfg_read_status;
u32 ticks;
int ret;
cfg_read_status = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_HEARTBEAT_TIMER, timer_str);
if (cfg_read_status == 0) {
if (kstrtouint(timer_str, 10, &timer_ms))
dev_dbg(&GET_DEV(accel_dev),
"kstrtouint failed to parse the %s, param value",
ADF_HEARTBEAT_TIMER);
}
if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
dev_err(&GET_DEV(accel_dev), "Timer cannot be less than %u\n",
ADF_CFG_HB_TIMER_MIN_MS);
return -EINVAL;
}
/*
* On 4xxx devices adf_timer is responsible for HB updates and
* its period is fixed to 200ms
*/
if (accel_dev->timer)
timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
if (ret)
return ret;
adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
accel_dev->heartbeat->hb_timer = timer_ms;
*value = ticks;
return 0;
}
static int check_ae(struct hb_cnt_pair *curr, struct hb_cnt_pair *prev,
u16 *count, const size_t hb_ctrs)
{
size_t thr;
/* loop through all threads in AE */
for (thr = 0; thr < hb_ctrs; thr++) {
u16 req = curr[thr].req_heartbeat_cnt;
u16 resp = curr[thr].resp_heartbeat_cnt;
u16 last = prev[thr].resp_heartbeat_cnt;
if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) {
u16 retry = ++count[thr];
if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
return -EIO;
} else {
count[thr] = 0;
}
}
return 0;
}
static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct hb_cnt_pair *live_stats, *last_stats, *curr_stats;
const size_t hb_ctrs = hw_device->num_hb_ctrs;
const unsigned long ae_mask = hw_device->ae_mask;
const size_t max_aes = hw_device->num_engines;
const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats));
struct hb_cnt_pair *ae_curr_p, *ae_prev_p;
u16 *count_fails, *ae_count_p;
size_t ae_offset;
size_t ae = 0;
int ret = 0;
if (!accel_dev->heartbeat->ctrs_cnt_checked) {
if (validate_hb_ctrs_cnt(accel_dev))
hw_device->num_hb_ctrs += ADF_NUM_PKE_STRAND;
accel_dev->heartbeat->ctrs_cnt_checked = true;
}
live_stats = accel_dev->heartbeat->dma.virt_addr;
last_stats = live_stats + dev_ctrs;
count_fails = (u16 *)(last_stats + dev_ctrs);
curr_stats = kmemdup(live_stats, stats_size, GFP_KERNEL);
if (!curr_stats)
return -ENOMEM;
/* loop through active AEs */
for_each_set_bit(ae, &ae_mask, max_aes) {
ae_offset = size_mul(ae, hb_ctrs);
ae_curr_p = curr_stats + ae_offset;
ae_prev_p = last_stats + ae_offset;
ae_count_p = count_fails + ae_offset;
ret = check_ae(ae_curr_p, ae_prev_p, ae_count_p, hb_ctrs);
if (ret)
break;
}
/* Copy current stats for the next iteration */
memcpy(last_stats, curr_stats, stats_size);
kfree(curr_stats);
return ret;
}
void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status)
{
struct adf_heartbeat *hb;
if (!adf_dev_started(accel_dev) ||
test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
*hb_status = HB_DEV_UNRESPONSIVE;
return;
}
if (adf_hb_check_polling_freq(accel_dev) == -EINVAL) {
*hb_status = HB_DEV_UNSUPPORTED;
return;
}
hb = accel_dev->heartbeat;
hb->hb_sent_counter++;
if (adf_hb_get_status(accel_dev)) {
dev_err(&GET_DEV(accel_dev),
"Heartbeat ERROR: QAT is not responding.\n");
*hb_status = HB_DEV_UNRESPONSIVE;
hb->hb_failed_counter++;
return;
}
*hb_status = HB_DEV_ALIVE;
}
int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
u32 *value)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 clk_per_sec;
/* HB clock may be different than AE clock */
if (!hw_data->get_hb_clock)
return -EINVAL;
clk_per_sec = hw_data->get_hb_clock(hw_data);
*value = time_ms * (clk_per_sec / MSEC_PER_SEC);
return 0;
}
int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
unsigned int timer_ms)
{
char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
snprintf(timer_str, sizeof(timer_str), "%u", timer_ms);
return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
ADF_HEARTBEAT_TIMER, timer_str,
ADF_STR);
}
EXPORT_SYMBOL_GPL(adf_heartbeat_save_cfg_param);
int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb;
hb = kzalloc(sizeof(*hb), GFP_KERNEL);
if (!hb)
goto err_ret;
hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
&hb->dma.phy_addr, GFP_KERNEL);
if (!hb->dma.virt_addr)
goto err_free;
/*
* Default set this flag as true to avoid unnecessary checks,
* it will be reset on platforms that need such a check
*/
hb->ctrs_cnt_checked = true;
accel_dev->heartbeat = hb;
return 0;
err_free:
kfree(hb);
err_ret:
return -ENOMEM;
}
int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
{
unsigned int timer_ticks;
int ret;
if (!accel_dev->heartbeat) {
dev_warn(&GET_DEV(accel_dev), "Heartbeat instance not found!");
return -EFAULT;
}
if (accel_dev->hw_device->check_hb_ctrs)
accel_dev->hw_device->check_hb_ctrs(accel_dev);
ret = get_timer_ticks(accel_dev, &timer_ticks);
if (ret)
return ret;
ret = adf_send_admin_hb_timer(accel_dev, timer_ticks);
if (ret)
dev_warn(&GET_DEV(accel_dev), "Heartbeat not supported!");
return ret;
}
void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb = accel_dev->heartbeat;
if (!hb)
return;
if (hb->dma.virt_addr)
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
hb->dma.virt_addr, hb->dma.phy_addr);
kfree(hb);
accel_dev->heartbeat = NULL;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_heartbeat.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/firmware.h>
#include <linux/pci.h>
#include "adf_cfg.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
u32 fw_size)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct icp_qat_fw_loader_handle *loader;
const char *obj_name;
u32 num_objs;
u32 ae_mask;
int i;
loader = loader_data->fw_loader;
num_objs = hw_device->uof_get_num_objs();
for (i = 0; i < num_objs; i++) {
obj_name = hw_device->uof_get_name(accel_dev, i);
ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
if (!obj_name || !ae_mask) {
dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
goto out_err;
}
if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
dev_err(&GET_DEV(accel_dev),
"Invalid mask for UOF image\n");
goto out_err;
}
if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) {
dev_err(&GET_DEV(accel_dev),
"Failed to map UOF firmware\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader)) {
dev_err(&GET_DEV(accel_dev),
"Failed to load UOF firmware\n");
goto out_err;
}
qat_uclo_del_obj(loader);
}
return 0;
out_err:
adf_ae_fw_release(accel_dev);
return -EFAULT;
}
int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
void *fw_addr, *mmp_addr;
u32 fw_size, mmp_size;
if (!hw_device->fw_name)
return 0;
if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
&accel_dev->accel_pci_dev.pci_dev->dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
hw_device->fw_mmp_name);
return -EFAULT;
}
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
&accel_dev->accel_pci_dev.pci_dev->dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
hw_device->fw_name);
goto out_err;
}
fw_size = loader_data->uof_fw->size;
fw_addr = (void *)loader_data->uof_fw->data;
mmp_size = loader_data->mmp_fw->size;
mmp_addr = (void *)loader_data->mmp_fw->data;
if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
goto out_err;
}
if (hw_device->uof_get_num_objs)
return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size);
if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) {
dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
goto out_err;
}
return 0;
out_err:
adf_ae_fw_release(accel_dev);
return -EFAULT;
}
void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
if (!hw_device->fw_name)
return;
qat_uclo_del_obj(loader_data->fw_loader);
qat_hal_deinit(loader_data->fw_loader);
release_firmware(loader_data->uof_fw);
release_firmware(loader_data->mmp_fw);
loader_data->uof_fw = NULL;
loader_data->mmp_fw = NULL;
loader_data->fw_loader = NULL;
}
int adf_ae_start(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 ae_ctr;
if (!hw_data->fw_name)
return 0;
ae_ctr = qat_hal_start(loader_data->fw_loader);
dev_info(&GET_DEV(accel_dev),
"qat_dev%d started %d acceleration engines\n",
accel_dev->accel_id, ae_ctr);
return 0;
}
int adf_ae_stop(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
if (!hw_data->fw_name)
return 0;
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
ae_ctr++;
}
}
dev_info(&GET_DEV(accel_dev),
"qat_dev%d stopped %d acceleration engines\n",
accel_dev->accel_id, ae_ctr);
return 0;
}
static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
qat_hal_reset(loader_data->fw_loader);
if (qat_hal_clr_reset(loader_data->fw_loader))
return -EFAULT;
return 0;
}
int adf_ae_init(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
if (!hw_device->fw_name)
return 0;
loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
if (!loader_data)
return -ENOMEM;
accel_dev->fw_loader = loader_data;
if (qat_hal_init(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
kfree(loader_data);
return -EFAULT;
}
if (adf_ae_reset(accel_dev, 0)) {
dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
qat_hal_deinit(loader_data->fw_loader);
kfree(loader_data);
return -EFAULT;
}
return 0;
}
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
if (!hw_device->fw_name)
return 0;
qat_hal_deinit(loader_data->fw_loader);
kfree(accel_dev->fw_loader);
accel_dev->fw_loader = NULL;
return 0;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_accel_engine.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/seq_file.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
static DEFINE_MUTEX(qat_cfg_read_lock);
static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_cfg_device_data *dev_cfg = sfile->private;
mutex_lock(&qat_cfg_read_lock);
return seq_list_start(&dev_cfg->sec_list, *pos);
}
static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
{
struct list_head *list;
struct adf_cfg_section *sec =
list_entry(v, struct adf_cfg_section, list);
seq_printf(sfile, "[%s]\n", sec->name);
list_for_each(list, &sec->param_head) {
struct adf_cfg_key_val *ptr =
list_entry(list, struct adf_cfg_key_val, list);
seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
}
return 0;
}
static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_cfg_device_data *dev_cfg = sfile->private;
return seq_list_next(v, &dev_cfg->sec_list, pos);
}
static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
{
mutex_unlock(&qat_cfg_read_lock);
}
static const struct seq_operations qat_dev_cfg_sops = {
.start = qat_dev_cfg_start,
.next = qat_dev_cfg_next,
.stop = qat_dev_cfg_stop,
.show = qat_dev_cfg_show
};
DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
/**
* adf_cfg_dev_add() - Create an acceleration device configuration table.
* @accel_dev: Pointer to acceleration device.
*
* Function creates a configuration table for the given acceleration device.
* The table stores device specific config values.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data;
dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
if (!dev_cfg_data)
return -ENOMEM;
INIT_LIST_HEAD(&dev_cfg_data->sec_list);
init_rwsem(&dev_cfg_data->lock);
accel_dev->cfg = dev_cfg_data;
return 0;
}
EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
dev_cfg_data->debug = debugfs_create_file("dev_cfg", 0400,
accel_dev->debugfs_dir,
dev_cfg_data,
&qat_dev_cfg_fops);
}
void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return;
debugfs_remove(dev_cfg_data->debug);
dev_cfg_data->debug = NULL;
}
static void adf_cfg_section_del_all(struct list_head *head);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
}
/**
* adf_cfg_dev_remove() - Clears acceleration device configuration table.
* @accel_dev: Pointer to acceleration device.
*
* Function removes configuration table from the given acceleration device
* and frees all allocated memory.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return;
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
kfree(dev_cfg_data);
accel_dev->cfg = NULL;
}
EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
struct adf_cfg_section *sec)
{
list_add_tail(&new->list, &sec->param_head);
}
static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
{
struct list_head *head = &sec->param_head;
struct list_head *list_ptr, *tmp;
list_for_each_prev_safe(list_ptr, tmp, head) {
struct adf_cfg_key_val *ptr =
list_entry(list_ptr, struct adf_cfg_key_val, list);
if (strncmp(ptr->key, key, sizeof(ptr->key)))
continue;
list_del(list_ptr);
kfree(ptr);
break;
}
}
static void adf_cfg_keyval_del_all(struct list_head *head)
{
struct list_head *list_ptr, *tmp;
list_for_each_prev_safe(list_ptr, tmp, head) {
struct adf_cfg_key_val *ptr =
list_entry(list_ptr, struct adf_cfg_key_val, list);
list_del(list_ptr);
kfree(ptr);
}
}
static void adf_cfg_section_del_all(struct list_head *head)
{
struct adf_cfg_section *ptr;
struct list_head *list, *tmp;
list_for_each_prev_safe(list, tmp, head) {
ptr = list_entry(list, struct adf_cfg_section, list);
adf_cfg_keyval_del_all(&ptr->param_head);
list_del(list);
kfree(ptr);
}
}
static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
const char *key)
{
struct list_head *list;
list_for_each(list, &s->param_head) {
struct adf_cfg_key_val *ptr =
list_entry(list, struct adf_cfg_key_val, list);
if (!strcmp(ptr->key, key))
return ptr;
}
return NULL;
}
static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
const char *sec_name)
{
struct adf_cfg_device_data *cfg = accel_dev->cfg;
struct list_head *list;
list_for_each(list, &cfg->sec_list) {
struct adf_cfg_section *ptr =
list_entry(list, struct adf_cfg_section, list);
if (!strcmp(ptr->name, sec_name))
return ptr;
}
return NULL;
}
static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
const char *sec_name,
const char *key_name,
char *val)
{
struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
struct adf_cfg_key_val *keyval = NULL;
if (sec)
keyval = adf_cfg_key_value_find(sec, key_name);
if (keyval) {
memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
return 0;
}
return -ENODATA;
}
/**
* adf_cfg_add_key_value_param() - Add key-value config entry to config table.
* @accel_dev: Pointer to acceleration device.
* @section_name: Name of the section where the param will be added
* @key: The key string
* @val: Value pain for the given @key
* @type: Type - string, int or address
*
* Function adds configuration key - value entry in the appropriate section
* in the given acceleration device. If the key exists already, the value
* is updated.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
const char *section_name,
const char *key, const void *val,
enum adf_cfg_val_type type)
{
struct adf_cfg_device_data *cfg = accel_dev->cfg;
struct adf_cfg_key_val *key_val;
struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
section_name);
char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
if (!section)
return -EFAULT;
key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
if (!key_val)
return -ENOMEM;
INIT_LIST_HEAD(&key_val->list);
strscpy(key_val->key, key, sizeof(key_val->key));
if (type == ADF_DEC) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"%ld", (*((long *)val)));
} else if (type == ADF_STR) {
strscpy(key_val->val, (char *)val, sizeof(key_val->val));
} else if (type == ADF_HEX) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"0x%lx", (unsigned long)val);
} else {
dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
kfree(key_val);
return -EINVAL;
}
key_val->type = type;
/* Add the key-value pair as below policy:
* 1. if the key doesn't exist, add it;
* 2. if the key already exists with a different value then update it
* to the new value (the key is deleted and the newly created
* key_val containing the new value is added to the database);
* 3. if the key exists with the same value, then return without doing
* anything (the newly created key_val is freed).
*/
if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
adf_cfg_keyval_remove(key, section);
} else {
kfree(key_val);
return 0;
}
}
down_write(&cfg->lock);
adf_cfg_keyval_add(key_val, section);
up_write(&cfg->lock);
return 0;
}
EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
/**
* adf_cfg_section_add() - Add config section entry to config table.
* @accel_dev: Pointer to acceleration device.
* @name: Name of the section
*
* Function adds configuration section where key - value entries
* will be stored.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
{
struct adf_cfg_device_data *cfg = accel_dev->cfg;
struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
if (sec)
return 0;
sec = kzalloc(sizeof(*sec), GFP_KERNEL);
if (!sec)
return -ENOMEM;
strscpy(sec->name, name, sizeof(sec->name));
INIT_LIST_HEAD(&sec->param_head);
down_write(&cfg->lock);
list_add_tail(&sec->list, &cfg->sec_list);
up_write(&cfg->lock);
return 0;
}
EXPORT_SYMBOL_GPL(adf_cfg_section_add);
int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
const char *section, const char *name,
char *value)
{
struct adf_cfg_device_data *cfg = accel_dev->cfg;
int ret;
down_read(&cfg->lock);
ret = adf_cfg_key_val_get(accel_dev, section, name, value);
up_read(&cfg->lock);
return ret;
}
EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_cfg.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pci_ids.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_hal.h"
#include "icp_qat_uclo.h"
#define BAD_REGADDR 0xffff
#define MAX_RETRY_TIMES 10000
#define INIT_CTX_ARB_VALUE 0x0
#define INIT_CTX_ENABLE_VALUE 0x0
#define INIT_PC_VALUE 0x0
#define INIT_WAKEUP_EVENTS_VALUE 0x1
#define INIT_SIG_EVENTS_VALUE 0x1
#define INIT_CCENABLE_VALUE 0x2000
#define RST_CSR_QAT_LSB 20
#define RST_CSR_AE_LSB 0
#define MC_TIMESTAMP_ENABLE (0x1 << 7)
#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
(~(1 << CE_REG_PAR_ERR_BITPOS)))
#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
(inst = ((inst & 0xFFFF00C03FFull) | \
((((const_val) << 12) & 0x0FF00000ull) | \
(((const_val) << 10) & 0x0003FC00ull))))
#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
(inst = ((inst & 0xFFFF00FFF00ull) | \
((((const_val) << 12) & 0x0FF00000ull) | \
(((const_val) << 0) & 0x000000FFull))))
#define AE(handle, ae) ((handle)->hal_handle->aes[ae])
static const u64 inst_4b[] = {
0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0A021000000ull
};
static const u64 inst[] = {
0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
};
void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask)
{
AE(handle, ae).live_ctx_mask = ctx_mask;
}
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
int value;
do {
value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int csr,
unsigned int value)
{
unsigned int iterations = CSR_RETRY_TIMES;
do {
SET_AE_CSR(handle, ae, csr, value);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
return 0;
} while (iterations--);
pr_err("QAT: Write CSR Timeout\n");
return -EFAULT;
}
static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
unsigned int *events)
{
unsigned int cur_ctx;
cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
*events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int cycles,
int chk_inactive)
{
unsigned int base_cnt = 0, cur_cnt = 0;
unsigned int csr = (1 << ACS_ABO_BITPOS);
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
if (elapsed_cycles < 0)
elapsed_cycles += 0x10000;
/* ensure at least 8 time cycles elapsed in wait_cycles */
if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
return 0;
}
if (times < 0) {
pr_err("QAT: wait_num_cycles time out\n");
return -EFAULT;
}
return 0;
}
#define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
#define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char mode)
{
unsigned int csr, new_csr;
if (mode != 4 && mode != 8) {
pr_err("QAT: bad ctx mode=%d\n", mode);
return -EINVAL;
}
/* Sets the accelaration engine context mode to either four or eight */
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
return 0;
}
int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char mode)
{
unsigned int csr, new_csr;
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
SET_BIT(csr, CE_NN_MODE_BITPOS) :
CLR_BIT(csr, CE_NN_MODE_BITPOS);
if (new_csr != csr)
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
return 0;
}
int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, enum icp_qat_uof_regtype lm_type,
unsigned char mode)
{
unsigned int csr, new_csr;
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
new_csr = (mode) ?
SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
break;
case ICP_LMEM1:
new_csr = (mode) ?
SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
break;
case ICP_LMEM2:
new_csr = (mode) ?
SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
break;
case ICP_LMEM3:
new_csr = (mode) ?
SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
break;
default:
pr_err("QAT: lmType = 0x%x\n", lm_type);
return -EINVAL;
}
if (new_csr != csr)
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
return 0;
}
void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char mode)
{
unsigned int csr, new_csr;
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
if (new_csr != csr)
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
}
static unsigned short qat_hal_get_reg_addr(unsigned int type,
unsigned short reg_num)
{
unsigned short reg_addr;
switch (type) {
case ICP_GPA_ABS:
case ICP_GPB_ABS:
reg_addr = 0x80 | (reg_num & 0x7f);
break;
case ICP_GPA_REL:
case ICP_GPB_REL:
reg_addr = reg_num & 0x1f;
break;
case ICP_SR_RD_REL:
case ICP_SR_WR_REL:
case ICP_SR_REL:
reg_addr = 0x180 | (reg_num & 0x1f);
break;
case ICP_SR_ABS:
reg_addr = 0x140 | ((reg_num & 0x3) << 1);
break;
case ICP_DR_RD_REL:
case ICP_DR_WR_REL:
case ICP_DR_REL:
reg_addr = 0x1c0 | (reg_num & 0x1f);
break;
case ICP_DR_ABS:
reg_addr = 0x100 | ((reg_num & 0x3) << 1);
break;
case ICP_NEIGH_REL:
reg_addr = 0x280 | (reg_num & 0x1f);
break;
case ICP_LMEM0:
reg_addr = 0x200;
break;
case ICP_LMEM1:
reg_addr = 0x220;
break;
case ICP_LMEM2:
reg_addr = 0x2c0;
break;
case ICP_LMEM3:
reg_addr = 0x2e0;
break;
case ICP_NO_DEST:
reg_addr = 0x300 | (reg_num & 0xff);
break;
default:
reg_addr = BAD_REGADDR;
break;
}
return reg_addr;
}
void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
{
unsigned int reset_mask = handle->chip_info->icp_rst_mask;
unsigned int reset_csr = handle->chip_info->icp_rst_csr;
unsigned int csr_val;
csr_val = GET_CAP_CSR(handle, reset_csr);
csr_val |= reset_mask;
SET_CAP_CSR(handle, reset_csr, csr_val);
}
static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask,
unsigned int ae_csr, unsigned int csr_val)
{
unsigned int ctx, cur_ctx;
cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
}
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
unsigned int ae_csr)
{
unsigned int cur_ctx, csr_val;
cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask,
unsigned int events)
{
unsigned int ctx, cur_ctx;
cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
}
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask,
unsigned int events)
{
unsigned int ctx, cur_ctx;
cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
events);
}
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned int base_cnt, cur_cnt;
unsigned char ae;
int times = MAX_RETRY_TIMES;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
if (times < 0) {
pr_err("QAT: AE%d is inactive!!\n", ae);
return -EFAULT;
}
}
return 0;
}
int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
unsigned int ae)
{
unsigned int enable = 0, active = 0;
enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
else
return 0;
}
static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned int misc_ctl_csr, misc_ctl;
unsigned char ae;
misc_ctl_csr = handle->chip_info->misc_ctl_csr;
/* stop the timestamp timers */
misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
if (misc_ctl & MC_TIMESTAMP_ENABLE)
SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
(~MC_TIMESTAMP_ENABLE));
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
}
/* start timestamp timers */
SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
}
#define ESRAM_AUTO_TINIT BIT(2)
#define ESRAM_AUTO_TINIT_DONE BIT(3)
#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
{
void __iomem *csr_addr =
(void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
ESRAM_AUTO_INIT_CSR_OFFSET);
unsigned int csr_val;
int times = 30;
if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
return 0;
csr_val = ADF_CSR_RD(csr_addr, 0);
if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
return 0;
csr_val = ADF_CSR_RD(csr_addr, 0);
csr_val |= ESRAM_AUTO_TINIT;
ADF_CSR_WR(csr_addr, 0, csr_val);
do {
qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
csr_val = ADF_CSR_RD(csr_addr, 0);
} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
if (times < 0) {
pr_err("QAT: Fail to init eSram!\n");
return -EFAULT;
}
return 0;
}
#define SHRAM_INIT_CYCLES 2060
int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
{
unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
unsigned int reset_mask = handle->chip_info->icp_rst_mask;
unsigned int reset_csr = handle->chip_info->icp_rst_csr;
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned char ae = 0;
unsigned int times = 100;
unsigned int csr_val;
/* write to the reset csr */
csr_val = GET_CAP_CSR(handle, reset_csr);
csr_val &= ~reset_mask;
do {
SET_CAP_CSR(handle, reset_csr, csr_val);
if (!(times--))
goto out_err;
csr_val = GET_CAP_CSR(handle, reset_csr);
csr_val &= reset_mask;
} while (csr_val);
/* enable clock */
csr_val = GET_CAP_CSR(handle, clk_csr);
csr_val |= reset_mask;
SET_CAP_CSR(handle, clk_csr, csr_val);
if (qat_hal_check_ae_alive(handle))
goto out_err;
/* Set undefined power-up/reset states to reasonable default values */
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
INIT_CTX_ENABLE_VALUE);
qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
qat_hal_put_wakeup_event(handle, ae,
ICP_QAT_UCLO_AE_ALL_CTX,
INIT_WAKEUP_EVENTS_VALUE);
qat_hal_put_sig_event(handle, ae,
ICP_QAT_UCLO_AE_ALL_CTX,
INIT_SIG_EVENTS_VALUE);
}
if (qat_hal_init_esram(handle))
goto out_err;
if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
goto out_err;
qat_hal_reset_timestamp(handle);
return 0;
out_err:
pr_err("QAT: failed to get device out of reset\n");
return -EFAULT;
}
static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask)
{
unsigned int ctx;
ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
}
static u64 qat_hal_parity_64bit(u64 word)
{
word ^= word >> 1;
word ^= word >> 2;
word ^= word >> 4;
word ^= word >> 8;
word ^= word >> 16;
word ^= word >> 32;
return word & 1;
}
static u64 qat_hal_set_uword_ecc(u64 uword)
{
u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
bit6_mask = 0xdaf69a46910ULL;
/* clear the ecc bits */
uword &= ~(0x7fULL << 0x2C);
uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
return uword;
}
void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
unsigned int words_num, u64 *uword)
{
unsigned int ustore_addr;
unsigned int i;
ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
unsigned int uwrd_lo, uwrd_hi;
u64 tmp;
tmp = qat_hal_set_uword_ecc(uword[i]);
uwrd_lo = (unsigned int)(tmp & 0xffffffff);
uwrd_hi = (unsigned int)(tmp >> 0x20);
qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
}
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
}
static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask)
{
unsigned int ctx;
ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
}
static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned char ae;
unsigned short reg;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
reg, 0);
qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
reg, 0);
}
}
}
static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned char ae;
unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
int times = MAX_RETRY_TIMES;
unsigned int csr_val = 0;
unsigned int savctx = 0;
int ret = 0;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
if (handle->chip_info->nn)
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
(u64 *)inst);
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
CTX_SIG_EVENTS_INDIRECT, 0);
qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
qat_hal_enable_ctx(handle, ae, ctx_mask);
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
/* wait for AE to finish */
do {
ret = qat_hal_wait_cycles(handle, ae, 20, 1);
} while (ret && times--);
if (times < 0) {
pr_err("QAT: clear GPR of AE %d failed", ae);
return -EINVAL;
}
qat_hal_disable_ctx(handle, ae, ctx_mask);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
savctx & ACS_ACNO);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
INIT_CTX_ENABLE_VALUE);
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
qat_hal_put_wakeup_event(handle, ae, ctx_mask,
INIT_WAKEUP_EVENTS_VALUE);
qat_hal_put_sig_event(handle, ae, ctx_mask,
INIT_SIG_EVENTS_VALUE);
}
return 0;
}
static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned int max_en_ae_id = 0;
struct adf_bar *sram_bar;
unsigned int csr_val = 0;
unsigned long ae_mask;
unsigned char ae = 0;
int ret = 0;
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
handle->chip_info->icp_rst_mask = 0x100015;
handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
handle->chip_info->fcu_loaded_ae_pos = 0;
handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
break;
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = true;
handle->chip_info->lm2lm3 = false;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
handle->chip_info->icp_rst_csr = ICP_RESET;
handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
(hw_data->accel_mask << RST_CSR_QAT_LSB);
handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
handle->chip_info->misc_ctl_csr = MISC_CONTROL;
handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = false;
handle->chip_info->tgroup_share_ustore = false;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
handle->chip_info->fcu_sts_csr = FCU_STATUS;
handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
break;
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
handle->chip_info->mmp_sram_size = 0x40000;
handle->chip_info->nn = true;
handle->chip_info->lm2lm3 = false;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
handle->chip_info->icp_rst_csr = ICP_RESET;
handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
(hw_data->accel_mask << RST_CSR_QAT_LSB);
handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
handle->chip_info->misc_ctl_csr = MISC_CONTROL;
handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
handle->chip_info->fw_auth = false;
handle->chip_info->css_3k = false;
handle->chip_info->tgroup_share_ustore = false;
handle->chip_info->fcu_ctl_csr = 0;
handle->chip_info->fcu_sts_csr = 0;
handle->chip_info->fcu_dram_addr_hi = 0;
handle->chip_info->fcu_dram_addr_lo = 0;
handle->chip_info->fcu_loaded_ae_csr = 0;
handle->chip_info->fcu_loaded_ae_pos = 0;
handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
break;
default:
ret = -EINVAL;
goto out_err;
}
if (handle->chip_info->mmp_sram_size > 0) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
}
handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
handle->hal_handle->ae_mask = hw_data->ae_mask;
handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
handle->hal_handle->slice_mask = hw_data->accel_mask;
handle->cfg_ae_mask = ALL_AE_MASK;
/* create AE objects */
handle->hal_handle->upc_mask = 0x1ffff;
handle->hal_handle->max_ustore = 0x4000;
ae_mask = handle->hal_handle->ae_mask;
for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
handle->hal_handle->aes[ae].free_addr = 0;
handle->hal_handle->aes[ae].free_size =
handle->hal_handle->max_ustore;
handle->hal_handle->aes[ae].ustore_size =
handle->hal_handle->max_ustore;
handle->hal_handle->aes[ae].live_ctx_mask =
ICP_QAT_UCLO_AE_ALL_CTX;
max_en_ae_id = ae;
}
handle->hal_handle->ae_max_num = max_en_ae_id + 1;
/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
out_err:
return ret;
}
int qat_hal_init(struct adf_accel_dev *accel_dev)
{
struct icp_qat_fw_loader_handle *handle;
int ret = 0;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle) {
ret = -ENOMEM;
goto out_hal_handle;
}
handle->chip_info = kzalloc(sizeof(*handle->chip_info), GFP_KERNEL);
if (!handle->chip_info) {
ret = -ENOMEM;
goto out_chip_info;
}
ret = qat_hal_chip_init(handle, accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
goto out_err;
}
/* take all AEs out of reset */
ret = qat_hal_clr_reset(handle);
if (ret) {
dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
goto out_err;
}
qat_hal_clear_xfer(handle);
if (!handle->chip_info->fw_auth) {
ret = qat_hal_clear_gpr(handle);
if (ret)
goto out_err;
}
accel_dev->fw_loader->fw_loader = handle;
return 0;
out_err:
kfree(handle->chip_info);
out_chip_info:
kfree(handle->hal_handle);
out_hal_handle:
kfree(handle);
return ret;
}
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
{
if (!handle)
return;
kfree(handle->chip_info);
kfree(handle->hal_handle);
kfree(handle);
}
int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
u32 wakeup_val = handle->chip_info->wakeup_event_val;
u32 fcu_ctl_csr, fcu_sts_csr;
unsigned int fcu_sts;
unsigned char ae;
u32 ae_ctr = 0;
int retry = 0;
if (handle->chip_info->fw_auth) {
fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
ae_ctr = hweight32(ae_mask);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
return ae_ctr;
} while (retry++ < FW_AUTH_MAX_RETRY);
pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
return 0;
} else {
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
ae_ctr++;
}
return ae_ctr;
}
}
void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
unsigned int ctx_mask)
{
if (!handle->chip_info->fw_auth)
qat_hal_disable_ctx(handle, ae, ctx_mask);
}
void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask, unsigned int upc)
{
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & upc);
}
static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
unsigned int words_num, u64 *uword)
{
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
}
void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
unsigned int words_num, unsigned int *data)
{
unsigned int i, ustore_addr;
ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
unsigned int uwrd_lo, uwrd_hi, tmp;
uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
((data[i] & 0xff00) << 2) |
(0x3 << 8) | (data[i] & 0xff);
uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
tmp = ((data[i] >> 0x10) & 0xffff);
uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
}
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
}
#define MAX_EXEC_INST 100
static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
u64 *micro_inst, unsigned int inst_num,
int code_off, unsigned int max_cycle,
unsigned int *endpc)
{
unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
unsigned int ind_t_index = 0, ind_t_index_byte = 0;
unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
u64 savuwords[MAX_EXEC_INST];
unsigned int ind_cnt_sig;
unsigned int ind_sig, act_sig;
unsigned int csr_val = 0, newcsr_val;
unsigned int savctx;
unsigned int savcc, wakeup_events, savpc;
unsigned int ctxarb_ctl, ctx_enables;
if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
pr_err("QAT: invalid instruction num %d\n", inst_num);
return -EINVAL;
}
/* save current context */
ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
INDIRECT_LM_ADDR_0_BYTE_INDEX);
ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (handle->chip_info->lm2lm3) {
ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
LM_ADDR_2_INDIRECT);
ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
LM_ADDR_3_INDIRECT);
ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
INDIRECT_LM_ADDR_2_BYTE_INDEX);
ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
INDIRECT_LM_ADDR_3_BYTE_INDEX);
ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
INDIRECT_T_INDEX);
ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
INDIRECT_T_INDEX_BYTE_INDEX);
}
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
FUTURE_COUNT_SIGNAL_INDIRECT);
ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
CTX_SIG_EVENTS_INDIRECT);
act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
if (code_off)
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
qat_hal_enable_ctx(handle, ae, (1 << ctx));
/* wait for micro codes to finish */
if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
return -EFAULT;
if (endpc) {
unsigned int ctx_status;
ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
qat_hal_disable_ctx(handle, ae, (1 << ctx));
if (inst_num <= MAX_EXEC_INST)
qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
LM_ADDR_0_INDIRECT, ind_lm_addr0);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
LM_ADDR_1_INDIRECT, ind_lm_addr1);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
if (handle->chip_info->lm2lm3) {
qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
ind_lm_addr2);
qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
ind_lm_addr3);
qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
INDIRECT_LM_ADDR_2_BYTE_INDEX,
ind_lm_addr_byte2);
qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
INDIRECT_LM_ADDR_3_BYTE_INDEX,
ind_lm_addr_byte3);
qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
INDIRECT_T_INDEX, ind_t_index);
qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
INDIRECT_T_INDEX_BYTE_INDEX,
ind_t_index_byte);
}
qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
CTX_SIG_EVENTS_INDIRECT, ind_sig);
qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
return 0;
}
static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int *data)
{
unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
unsigned short reg_addr;
int status = 0;
u64 insts, savuword;
reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
if (reg_addr == BAD_REGADDR) {
pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
return -EINVAL;
}
switch (reg_type) {
case ICP_GPA_REL:
insts = 0xA070000000ull | (reg_addr & 0x3ff);
break;
default:
insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
uwrd_lo = (unsigned int)(insts & 0xffffffff);
uwrd_hi = (unsigned int)(insts >> 0x20);
qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
/* delay for at least 8 cycles */
qat_hal_wait_cycles(handle, ae, 0x8, 0);
/*
* read ALU output
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
*data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
savctx & ACS_ACNO);
qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
return status;
}
static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int data)
{
unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
u64 insts[] = {
0x0F440000000ull,
0x0F040000000ull,
0x0F0000C0300ull,
0x0E000010000ull
};
const int num_inst = ARRAY_SIZE(insts), code_off = 1;
const int imm_w1 = 0, imm_w0 = 1;
dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
if (dest_addr == BAD_REGADDR) {
pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
return -EINVAL;
}
data16lo = 0xffff & data;
data16hi = 0xffff & (data >> 0x10);
src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
(0xff & data16hi));
src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
(0xff & data16lo));
switch (reg_type) {
case ICP_GPA_REL:
insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
break;
default:
insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
break;
}
return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
code_off, num_inst * 0x5, NULL);
}
int qat_hal_get_ins_num(void)
{
return ARRAY_SIZE(inst_4b);
}
static int qat_hal_concat_micro_code(u64 *micro_inst,
unsigned int inst_num, unsigned int size,
unsigned int addr, unsigned int *value)
{
int i;
unsigned int cur_value;
const u64 *inst_arr;
int fixup_offset;
int usize = 0;
int orig_num;
orig_num = inst_num;
cur_value = value[0];
inst_arr = inst_4b;
usize = ARRAY_SIZE(inst_4b);
fixup_offset = inst_num;
for (i = 0; i < usize; i++)
micro_inst[inst_num++] = inst_arr[i];
INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
fixup_offset++;
INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
fixup_offset++;
INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
fixup_offset++;
INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
return inst_num - orig_num;
}
static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
int *pfirst_exec, u64 *micro_inst,
unsigned int inst_num)
{
int stat = 0;
unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
unsigned int gprb0 = 0, gprb1 = 0;
if (*pfirst_exec) {
qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
*pfirst_exec = 0;
}
stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
inst_num * 0x5, NULL);
if (stat != 0)
return -EFAULT;
qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
return 0;
}
int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
unsigned char ae,
struct icp_qat_uof_batch_init *lm_init_header)
{
struct icp_qat_uof_batch_init *plm_init;
u64 *micro_inst_arry;
int micro_inst_num;
int alloc_inst_size;
int first_exec = 1;
int stat = 0;
plm_init = lm_init_header->next;
alloc_inst_size = lm_init_header->size;
if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
alloc_inst_size = handle->hal_handle->max_ustore;
micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
GFP_KERNEL);
if (!micro_inst_arry)
return -ENOMEM;
micro_inst_num = 0;
while (plm_init) {
unsigned int addr, *value, size;
ae = plm_init->ae;
addr = plm_init->addr;
value = plm_init->value;
size = plm_init->size;
micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
micro_inst_num,
size, addr, value);
plm_init = plm_init->next;
}
/* exec micro codes */
if (micro_inst_arry && micro_inst_num > 0) {
micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
micro_inst_arry,
micro_inst_num);
}
kfree(micro_inst_arry);
return stat;
}
static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int val)
{
int status = 0;
unsigned int reg_addr;
unsigned int ctx_enables;
unsigned short mask;
unsigned short dr_offset = 0x10;
ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
return -EINVAL;
}
mask = 0x1f;
dr_offset = 0x20;
} else {
mask = 0x0f;
}
if (reg_num & ~mask)
return -EINVAL;
reg_addr = reg_num + (ctx << 0x5);
switch (reg_type) {
case ICP_SR_RD_REL:
case ICP_SR_REL:
SET_AE_XFER(handle, ae, reg_addr, val);
break;
case ICP_DR_RD_REL:
case ICP_DR_REL:
SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
break;
default:
status = -EINVAL;
break;
}
return status;
}
static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int data)
{
unsigned int gprval, ctx_enables;
unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
data16low;
unsigned short reg_mask;
int status = 0;
u64 micro_inst[] = {
0x0F440000000ull,
0x0F040000000ull,
0x0A000000000ull,
0x0F0000C0300ull,
0x0E000010000ull
};
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
return -EINVAL;
}
reg_mask = (unsigned short)~0x1f;
} else {
reg_mask = (unsigned short)~0xf;
}
if (reg_num & reg_mask)
return -EINVAL;
xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
if (xfr_addr == BAD_REGADDR) {
pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
return -EINVAL;
}
status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
if (status) {
pr_err("QAT: failed to read register");
return status;
}
gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
data16low = 0xffff & data;
data16hi = 0xffff & (data >> 0x10);
src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
(unsigned short)(0xff & data16hi));
src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
(unsigned short)(0xff & data16low));
micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
micro_inst[0x2] = micro_inst[0x2] |
((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
code_off, dly, NULL);
qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
return status;
}
static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
unsigned short nn, unsigned int val)
{
unsigned int ctx_enables;
int stat = 0;
ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
return stat;
}
static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
*handle, unsigned char ae,
unsigned short absreg_num,
unsigned short *relreg,
unsigned char *ctx)
{
unsigned int ctx_enables;
ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
*ctx = (absreg_num >> 0x4) & 0x6;
} else {
/* 8-ctx mode */
*relreg = absreg_num & 0x0F;
*ctx = (absreg_num >> 0x4) & 0x7;
}
return 0;
}
int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned long ctx_mask,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int regdata)
{
int stat = 0;
unsigned short reg;
unsigned char ctx = 0;
enum icp_qat_uof_regtype type;
if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
return -EINVAL;
do {
if (ctx_mask == 0) {
qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
&ctx);
type = reg_type - 1;
} else {
reg = reg_num;
type = reg_type;
if (!test_bit(ctx, &ctx_mask))
continue;
}
stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
if (stat) {
pr_err("QAT: write gpr fail\n");
return -EINVAL;
}
} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
return 0;
}
int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned long ctx_mask,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int regdata)
{
int stat = 0;
unsigned short reg;
unsigned char ctx = 0;
enum icp_qat_uof_regtype type;
if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
return -EINVAL;
do {
if (ctx_mask == 0) {
qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
&ctx);
type = reg_type - 3;
} else {
reg = reg_num;
type = reg_type;
if (!test_bit(ctx, &ctx_mask))
continue;
}
stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
regdata);
if (stat) {
pr_err("QAT: write wr xfer fail\n");
return -EINVAL;
}
} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
return 0;
}
int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned long ctx_mask,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_num, unsigned int regdata)
{
int stat = 0;
unsigned short reg;
unsigned char ctx = 0;
enum icp_qat_uof_regtype type;
if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
return -EINVAL;
do {
if (ctx_mask == 0) {
qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
&ctx);
type = reg_type - 3;
} else {
reg = reg_num;
type = reg_type;
if (!test_bit(ctx, &ctx_mask))
continue;
}
stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
regdata);
if (stat) {
pr_err("QAT: write rd xfer fail\n");
return -EINVAL;
}
} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
return 0;
}
int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned long ctx_mask,
unsigned short reg_num, unsigned int regdata)
{
int stat = 0;
unsigned char ctx;
if (!handle->chip_info->nn) {
dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
handle->pci_dev->device);
return -EINVAL;
}
if (ctx_mask == 0)
return -EINVAL;
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!test_bit(ctx, &ctx_mask))
continue;
stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
if (stat) {
pr_err("QAT: write neigh error\n");
return -EINVAL;
}
}
return 0;
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_hal.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
#include <crypto/algapi.h>
#include "adf_transport.h"
#include "qat_algs_send.h"
#include "qat_crypto.h"
#define ADF_MAX_RETRIES 20
static int qat_alg_send_message_retry(struct qat_alg_req *req)
{
int ret = 0, ctr = 0;
do {
ret = adf_send_message(req->tx_ring, req->fw_req);
} while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
if (ret == -EAGAIN)
return -ENOSPC;
return -EINPROGRESS;
}
void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
{
struct qat_alg_req *req, *tmp;
spin_lock_bh(&backlog->lock);
list_for_each_entry_safe(req, tmp, &backlog->list, list) {
if (adf_send_message(req->tx_ring, req->fw_req)) {
/* The HW ring is full. Do nothing.
* qat_alg_send_backlog() will be invoked again by
* another callback.
*/
break;
}
list_del(&req->list);
crypto_request_complete(req->base, -EINPROGRESS);
}
spin_unlock_bh(&backlog->lock);
}
static void qat_alg_backlog_req(struct qat_alg_req *req,
struct qat_instance_backlog *backlog)
{
INIT_LIST_HEAD(&req->list);
spin_lock_bh(&backlog->lock);
list_add_tail(&req->list, &backlog->list);
spin_unlock_bh(&backlog->lock);
}
static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
{
struct qat_instance_backlog *backlog = req->backlog;
struct adf_etr_ring_data *tx_ring = req->tx_ring;
u32 *fw_req = req->fw_req;
/* If any request is already backlogged, then add to backlog list */
if (!list_empty(&backlog->list))
goto enqueue;
/* If ring is nearly full, then add to backlog list */
if (adf_ring_nearly_full(tx_ring))
goto enqueue;
/* If adding request to HW ring fails, then add to backlog list */
if (adf_send_message(tx_ring, fw_req))
goto enqueue;
return -EINPROGRESS;
enqueue:
qat_alg_backlog_req(req, backlog);
return -EBUSY;
}
int qat_alg_send_message(struct qat_alg_req *req)
{
u32 flags = req->base->flags;
if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
return qat_alg_send_message_maybacklog(req);
else
return qat_alg_send_message_retry(req);
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_algs_send.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci_ids.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
#include "icp_qat_hal.h"
#include "icp_qat_fw_loader_handle.h"
#define UWORD_CPYBUF_SIZE 1024U
#define INVLD_UWORD 0xffffffffffull
#define PID_MINOR_REV 0xf
#define PID_MAJOR_REV (0xf << 4)
static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
unsigned int ae, unsigned int image_num)
{
struct icp_qat_uclo_aedata *ae_data;
struct icp_qat_uclo_encapme *encap_image;
struct icp_qat_uclo_page *page = NULL;
struct icp_qat_uclo_aeslice *ae_slice = NULL;
ae_data = &obj_handle->ae_data[ae];
encap_image = &obj_handle->ae_uimage[image_num];
ae_slice = &ae_data->ae_slices[ae_data->slice_num];
ae_slice->encap_image = encap_image;
if (encap_image->img_ptr) {
ae_slice->ctx_mask_assigned =
encap_image->img_ptr->ctx_assigned;
ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
} else {
ae_slice->ctx_mask_assigned = 0;
}
ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
if (!ae_slice->region)
return -ENOMEM;
ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
if (!ae_slice->page)
goto out_err;
page = ae_slice->page;
page->encap_page = encap_image->page;
ae_slice->page->region = ae_slice->region;
ae_data->slice_num++;
return 0;
out_err:
kfree(ae_slice->region);
ae_slice->region = NULL;
return -ENOMEM;
}
static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
{
unsigned int i;
if (!ae_data) {
pr_err("QAT: bad argument, ae_data is NULL\n ");
return -EINVAL;
}
for (i = 0; i < ae_data->slice_num; i++) {
kfree(ae_data->ae_slices[i].region);
ae_data->ae_slices[i].region = NULL;
kfree(ae_data->ae_slices[i].page);
ae_data->ae_slices[i].page = NULL;
}
return 0;
}
static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
unsigned int str_offset)
{
if (!str_table->table_len || str_offset > str_table->table_len)
return NULL;
return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
}
static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
{
int maj = hdr->maj_ver & 0xff;
int min = hdr->min_ver & 0xff;
if (hdr->file_id != ICP_QAT_UOF_FID) {
pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
return -EINVAL;
}
if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
maj, min);
return -EINVAL;
}
return 0;
}
static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
{
int maj = suof_hdr->maj_ver & 0xff;
int min = suof_hdr->min_ver & 0xff;
if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
return -EINVAL;
}
if (suof_hdr->fw_type != 0) {
pr_err("QAT: unsupported firmware type\n");
return -EINVAL;
}
if (suof_hdr->num_chunks <= 0x1) {
pr_err("QAT: SUOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
maj, min);
return -EINVAL;
}
return 0;
}
static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
unsigned int addr, unsigned int *val,
unsigned int num_in_bytes)
{
unsigned int outval;
unsigned char *ptr = (unsigned char *)val;
while (num_in_bytes) {
memcpy(&outval, ptr, 4);
SRAM_WRITE(handle, addr, outval);
num_in_bytes -= 4;
ptr += 4;
addr += 4;
}
}
static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int addr,
unsigned int *val,
unsigned int num_in_bytes)
{
unsigned int outval;
unsigned char *ptr = (unsigned char *)val;
addr >>= 0x2; /* convert to uword address */
while (num_in_bytes) {
memcpy(&outval, ptr, 4);
qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
num_in_bytes -= 4;
ptr += 4;
}
}
static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
unsigned char ae,
struct icp_qat_uof_batch_init
*umem_init_header)
{
struct icp_qat_uof_batch_init *umem_init;
if (!umem_init_header)
return;
umem_init = umem_init_header->next;
while (umem_init) {
unsigned int addr, *value, size;
ae = umem_init->ae;
addr = umem_init->addr;
value = umem_init->value;
size = umem_init->size;
qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
umem_init = umem_init->next;
}
}
static void
qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_batch_init **base)
{
struct icp_qat_uof_batch_init *umem_init;
umem_init = *base;
while (umem_init) {
struct icp_qat_uof_batch_init *pre;
pre = umem_init;
umem_init = umem_init->next;
kfree(pre);
}
*base = NULL;
}
static int qat_uclo_parse_num(char *str, unsigned int *num)
{
char buf[16] = {0};
unsigned long ae = 0;
int i;
strncpy(buf, str, 15);
for (i = 0; i < 16; i++) {
if (!isdigit(buf[i])) {
buf[i] = '\0';
break;
}
}
if ((kstrtoul(buf, 10, &ae)))
return -EFAULT;
*num = (unsigned int)ae;
return 0;
}
static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem,
unsigned int size_range, unsigned int *ae)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
char *str;
if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
pr_err("QAT: initmem is out of range");
return -EINVAL;
}
if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
pr_err("QAT: Memory scope for init_mem error\n");
return -EINVAL;
}
str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
if (!str) {
pr_err("QAT: AE name assigned in UOF init table is NULL\n");
return -EINVAL;
}
if (qat_uclo_parse_num(str, ae)) {
pr_err("QAT: Parse num for AE number failed\n");
return -EINVAL;
}
if (*ae >= ICP_QAT_UCLO_MAX_AE) {
pr_err("QAT: ae %d out of range\n", *ae);
return -EINVAL;
}
return 0;
}
static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
*handle, struct icp_qat_uof_initmem
*init_mem, unsigned int ae,
struct icp_qat_uof_batch_init
**init_tab_base)
{
struct icp_qat_uof_batch_init *init_header, *tail;
struct icp_qat_uof_batch_init *mem_init, *tail_old;
struct icp_qat_uof_memvar_attr *mem_val_attr;
unsigned int i, flag = 0;
mem_val_attr =
(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
sizeof(struct icp_qat_uof_initmem));
init_header = *init_tab_base;
if (!init_header) {
init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
if (!init_header)
return -ENOMEM;
init_header->size = 1;
*init_tab_base = init_header;
flag = 1;
}
tail_old = init_header;
while (tail_old->next)
tail_old = tail_old->next;
tail = tail_old;
for (i = 0; i < init_mem->val_attr_num; i++) {
mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
if (!mem_init)
goto out_err;
mem_init->ae = ae;
mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
mem_init->value = &mem_val_attr->value;
mem_init->size = 4;
mem_init->next = NULL;
tail->next = mem_init;
tail = mem_init;
init_header->size += qat_hal_get_ins_num();
mem_val_attr++;
}
return 0;
out_err:
/* Do not free the list head unless we allocated it. */
tail_old = tail_old->next;
if (flag) {
kfree(*init_tab_base);
*init_tab_base = NULL;
}
while (tail_old) {
mem_init = tail_old->next;
kfree(tail_old);
tail_old = mem_init;
}
return -ENOMEM;
}
static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int ae;
if (qat_uclo_fetch_initmem_ae(handle, init_mem,
handle->chip_info->lm_size, &ae))
return -EINVAL;
if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
&obj_handle->lm_init_tab[ae]))
return -EINVAL;
return 0;
}
static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int ae, ustore_size, uaddr, i;
struct icp_qat_uclo_aedata *aed;
ustore_size = obj_handle->ustore_phy_size;
if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
return -EINVAL;
if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
&obj_handle->umem_init_tab[ae]))
return -EINVAL;
/* set the highest ustore address referenced */
uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
aed = &obj_handle->ae_data[ae];
for (i = 0; i < aed->slice_num; i++) {
if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
aed->ae_slices[i].encap_image->uwords_num = uaddr;
}
return 0;
}
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
switch (init_mem->region) {
case ICP_QAT_UOF_LMEM_REGION:
if (qat_uclo_init_lmem_seg(handle, init_mem))
return -EINVAL;
break;
case ICP_QAT_UOF_UMEM_REGION:
if (qat_uclo_init_umem_seg(handle, init_mem))
return -EINVAL;
break;
default:
pr_err("QAT: initmem region error. region type=0x%x\n",
init_mem->region);
return -EINVAL;
}
return 0;
}
static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uclo_encapme *image)
{
unsigned int i;
struct icp_qat_uclo_encap_page *page;
struct icp_qat_uof_image *uof_image;
unsigned char ae;
unsigned int ustore_size;
unsigned int patt_pos;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned long cfg_ae_mask = handle->cfg_ae_mask;
u64 *fill_data;
uof_image = image->img_ptr;
fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
GFP_KERNEL);
if (!fill_data)
return -ENOMEM;
for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
memcpy(&fill_data[i], &uof_image->fill_pattern,
sizeof(u64));
page = image->page;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
unsigned long ae_assigned = uof_image->ae_assigned;
if (!test_bit(ae, &ae_assigned))
continue;
if (!test_bit(ae, &cfg_ae_mask))
continue;
ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
patt_pos = page->beg_addr_p + page->micro_words_num;
qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
page->beg_addr_p, &fill_data[0]);
qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
ustore_size - patt_pos + 1,
&fill_data[page->beg_addr_p]);
}
kfree(fill_data);
return 0;
}
static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
{
int i, ae;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
unsigned long ae_mask = handle->hal_handle->ae_mask;
for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
if (initmem->num_in_bytes) {
if (qat_uclo_init_ae_memory(handle, initmem))
return -EINVAL;
}
initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
(uintptr_t)initmem +
sizeof(struct icp_qat_uof_initmem)) +
(sizeof(struct icp_qat_uof_memvar_attr) *
initmem->val_attr_num));
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_batch_wr_lm(handle, ae,
obj_handle->lm_init_tab[ae])) {
pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
return -EINVAL;
}
qat_uclo_cleanup_batch_init_list(handle,
&obj_handle->lm_init_tab[ae]);
qat_uclo_batch_wr_umem(handle, ae,
obj_handle->umem_init_tab[ae]);
qat_uclo_cleanup_batch_init_list(handle,
&obj_handle->
umem_init_tab[ae]);
}
return 0;
}
static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
char *chunk_id, void *cur)
{
int i;
struct icp_qat_uof_chunkhdr *chunk_hdr =
(struct icp_qat_uof_chunkhdr *)
((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
for (i = 0; i < obj_hdr->num_chunks; i++) {
if ((cur < (void *)&chunk_hdr[i]) &&
!strncmp(chunk_hdr[i].chunk_id, chunk_id,
ICP_QAT_UOF_OBJID_LEN)) {
return &chunk_hdr[i];
}
}
return NULL;
}
static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
{
int i;
unsigned int topbit = 1 << 0xF;
unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
reg ^= inbyte << 0x8;
for (i = 0; i < 0x8; i++) {
if (reg & topbit)
reg = (reg << 1) ^ 0x1021;
else
reg <<= 1;
}
return reg & 0xFFFF;
}
static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
{
unsigned int chksum = 0;
if (ptr)
while (num--)
chksum = qat_uclo_calc_checksum(chksum, *ptr++);
return chksum;
}
static struct icp_qat_uclo_objhdr *
qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
char *chunk_id)
{
struct icp_qat_uof_filechunkhdr *file_chunk;
struct icp_qat_uclo_objhdr *obj_hdr;
char *chunk;
int i;
file_chunk = (struct icp_qat_uof_filechunkhdr *)
(buf + sizeof(struct icp_qat_uof_filehdr));
for (i = 0; i < file_hdr->num_chunks; i++) {
if (!strncmp(file_chunk->chunk_id, chunk_id,
ICP_QAT_UOF_OBJID_LEN)) {
chunk = buf + file_chunk->offset;
if (file_chunk->checksum != qat_uclo_calc_str_checksum(
chunk, file_chunk->size))
break;
obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
if (!obj_hdr)
break;
obj_hdr->file_buff = chunk;
obj_hdr->checksum = file_chunk->checksum;
obj_hdr->size = file_chunk->size;
return obj_hdr;
}
file_chunk++;
}
return NULL;
}
static int
qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
struct icp_qat_uof_image *image)
{
struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
struct icp_qat_uof_objtable *neigh_reg_tab;
struct icp_qat_uof_code_page *code_page;
code_page = (struct icp_qat_uof_code_page *)
((char *)image + sizeof(struct icp_qat_uof_image));
uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
code_page->uc_var_tab_offset);
imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
code_page->imp_var_tab_offset);
imp_expr_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
pr_err("QAT: UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
pr_err("QAT: UOF can't contain neighbor register table\n");
return -EINVAL;
}
if (image->numpages > 1) {
pr_err("QAT: UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
pr_err("QAT: UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
pr_err("QAT: UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
}
static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
*encap_uof_obj,
struct icp_qat_uof_image *img,
struct icp_qat_uclo_encap_page *page)
{
struct icp_qat_uof_code_page *code_page;
struct icp_qat_uof_code_area *code_area;
struct icp_qat_uof_objtable *uword_block_tab;
struct icp_qat_uof_uword_block *uwblock;
int i;
code_page = (struct icp_qat_uof_code_page *)
((char *)img + sizeof(struct icp_qat_uof_image));
page->def_page = code_page->def_page;
page->page_region = code_page->page_region;
page->beg_addr_v = code_page->beg_addr_v;
page->beg_addr_p = code_page->beg_addr_p;
code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
code_page->code_area_offset);
page->micro_words_num = code_area->micro_words_num;
uword_block_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_area->uword_block_tab);
page->uwblock_num = uword_block_tab->entry_num;
uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
sizeof(struct icp_qat_uof_objtable));
page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
for (i = 0; i < uword_block_tab->entry_num; i++)
page->uwblock[i].micro_words =
(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
}
static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
struct icp_qat_uclo_encapme *ae_uimage,
int max_image)
{
int i, j;
struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
struct icp_qat_uof_image *image;
struct icp_qat_uof_objtable *ae_regtab;
struct icp_qat_uof_objtable *init_reg_sym_tab;
struct icp_qat_uof_objtable *sbreak_tab;
struct icp_qat_uof_encap_obj *encap_uof_obj =
&obj_handle->encap_uof_obj;
for (j = 0; j < max_image; j++) {
chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
ICP_QAT_UOF_IMAG, chunk_hdr);
if (!chunk_hdr)
break;
image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
chunk_hdr->offset);
ae_regtab = (struct icp_qat_uof_objtable *)
(image->reg_tab_offset +
obj_handle->obj_hdr->file_buff);
ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
(((char *)ae_regtab) +
sizeof(struct icp_qat_uof_objtable));
init_reg_sym_tab = (struct icp_qat_uof_objtable *)
(image->init_reg_sym_tab +
obj_handle->obj_hdr->file_buff);
ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
(((char *)init_reg_sym_tab) +
sizeof(struct icp_qat_uof_objtable));
sbreak_tab = (struct icp_qat_uof_objtable *)
(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
(((char *)sbreak_tab) +
sizeof(struct icp_qat_uof_objtable));
ae_uimage[j].img_ptr = image;
if (qat_uclo_check_image_compat(encap_uof_obj, image))
goto out_err;
ae_uimage[j].page =
kzalloc(sizeof(struct icp_qat_uclo_encap_page),
GFP_KERNEL);
if (!ae_uimage[j].page)
goto out_err;
qat_uclo_map_image_page(encap_uof_obj, image,
ae_uimage[j].page);
}
return j;
out_err:
for (i = 0; i < j; i++)
kfree(ae_uimage[i].page);
return 0;
}
static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
{
int i, ae;
int mflag = 0;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned long cfg_ae_mask = handle->cfg_ae_mask;
for_each_set_bit(ae, &ae_mask, max_ae) {
if (!test_bit(ae, &cfg_ae_mask))
continue;
for (i = 0; i < obj_handle->uimage_num; i++) {
unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
if (!test_bit(ae, &ae_assigned))
continue;
mflag = 1;
if (qat_uclo_init_ae_data(obj_handle, ae, i))
return -EINVAL;
}
}
if (!mflag) {
pr_err("QAT: uimage uses AE not set\n");
return -EINVAL;
}
return 0;
}
static struct icp_qat_uof_strtable *
qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
char *tab_name, struct icp_qat_uof_strtable *str_table)
{
struct icp_qat_uof_chunkhdr *chunk_hdr;
chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
obj_hdr->file_buff, tab_name, NULL);
if (chunk_hdr) {
int hdr_size;
memcpy(&str_table->table_len, obj_hdr->file_buff +
chunk_hdr->offset, sizeof(str_table->table_len));
hdr_size = (char *)&str_table->strings - (char *)str_table;
str_table->strings = (uintptr_t)obj_hdr->file_buff +
chunk_hdr->offset + hdr_size;
return str_table;
}
return NULL;
}
static void
qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
struct icp_qat_uclo_init_mem_table *init_mem_tab)
{
struct icp_qat_uof_chunkhdr *chunk_hdr;
chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
ICP_QAT_UOF_IMEM, NULL);
if (chunk_hdr) {
memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
chunk_hdr->offset, sizeof(unsigned int));
init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
(encap_uof_obj->beg_uof + chunk_hdr->offset +
sizeof(unsigned int));
}
}
static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
{
switch (handle->pci_dev->device) {
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
return ICP_QAT_AC_895XCC_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C62X:
return ICP_QAT_AC_C62X_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
handle->pci_dev->device);
return 0;
}
}
static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
{
unsigned int maj_ver, prod_type = obj_handle->prod_type;
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
prod_type);
return -EINVAL;
}
maj_ver = obj_handle->prod_rev & 0xff;
if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
return -EINVAL;
}
return 0;
}
static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx_mask,
enum icp_qat_uof_regtype reg_type,
unsigned short reg_addr, unsigned int value)
{
switch (reg_type) {
case ICP_GPA_ABS:
case ICP_GPB_ABS:
ctx_mask = 0;
fallthrough;
case ICP_GPA_REL:
case ICP_GPB_REL:
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
reg_addr, value);
case ICP_SR_ABS:
case ICP_DR_ABS:
case ICP_SR_RD_ABS:
case ICP_DR_RD_ABS:
ctx_mask = 0;
fallthrough;
case ICP_SR_REL:
case ICP_DR_REL:
case ICP_SR_RD_REL:
case ICP_DR_RD_REL:
return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
reg_addr, value);
case ICP_SR_WR_ABS:
case ICP_DR_WR_ABS:
ctx_mask = 0;
fallthrough;
case ICP_SR_WR_REL:
case ICP_DR_WR_REL:
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
reg_addr, value);
case ICP_NEIGH_REL:
return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
default:
pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
return -EFAULT;
}
return 0;
}
static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
unsigned int ae,
struct icp_qat_uclo_encapme *encap_ae)
{
unsigned int i;
unsigned char ctx_mask;
struct icp_qat_uof_init_regsym *init_regsym;
if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
ICP_QAT_UCLO_MAX_CTX)
ctx_mask = 0xff;
else
ctx_mask = 0x55;
for (i = 0; i < encap_ae->init_regsym_num; i++) {
unsigned int exp_res;
init_regsym = &encap_ae->init_regsym[i];
exp_res = init_regsym->value;
switch (init_regsym->init_type) {
case ICP_QAT_UOF_INIT_REG:
qat_uclo_init_reg(handle, ae, ctx_mask,
(enum icp_qat_uof_regtype)
init_regsym->reg_type,
(unsigned short)init_regsym->reg_addr,
exp_res);
break;
case ICP_QAT_UOF_INIT_REG_CTX:
/* check if ctx is appropriate for the ctxMode */
if (!((1 << init_regsym->ctx) & ctx_mask)) {
pr_err("QAT: invalid ctx num = 0x%x\n",
init_regsym->ctx);
return -EINVAL;
}
qat_uclo_init_reg(handle, ae,
(unsigned char)
(1 << init_regsym->ctx),
(enum icp_qat_uof_regtype)
init_regsym->reg_type,
(unsigned short)init_regsym->reg_addr,
exp_res);
break;
case ICP_QAT_UOF_INIT_EXPR:
pr_err("QAT: INIT_EXPR feature not supported\n");
return -EINVAL;
case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
return -EINVAL;
default:
break;
}
}
return 0;
}
static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned long ae_mask = handle->hal_handle->ae_mask;
struct icp_qat_uclo_aedata *aed;
unsigned int s, ae;
if (obj_handle->global_inited)
return 0;
if (obj_handle->init_mem_tab.entry_num) {
if (qat_uclo_init_memory(handle)) {
pr_err("QAT: initialize memory failed\n");
return -EINVAL;
}
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
aed = &obj_handle->ae_data[ae];
for (s = 0; s < aed->slice_num; s++) {
if (!aed->ae_slices[s].encap_image)
continue;
if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
return -EINVAL;
}
}
obj_handle->global_inited = 1;
return 0;
}
static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uclo_objhandle *obj_handle,
unsigned char ae,
struct icp_qat_uof_image *uof_image)
{
unsigned char mode;
int ret;
mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
if (ret) {
pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
return ret;
}
if (handle->chip_info->nn) {
mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
if (ret) {
pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
return ret;
}
}
mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
if (ret) {
pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
if (ret) {
pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
return ret;
}
if (handle->chip_info->lm2lm3) {
mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
if (ret) {
pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
if (ret) {
pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
return ret;
}
mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
qat_hal_set_ae_tindex_mode(handle, ae, mode);
}
return 0;
}
static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uof_image *uof_image;
struct icp_qat_uclo_aedata *ae_data;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned long cfg_ae_mask = handle->cfg_ae_mask;
unsigned char ae, s;
int error;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (!test_bit(ae, &cfg_ae_mask))
continue;
ae_data = &obj_handle->ae_data[ae];
for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
ICP_QAT_UCLO_MAX_CTX); s++) {
if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
continue;
uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
error = qat_hal_set_modes(handle, obj_handle, ae,
uof_image);
if (error)
return error;
}
}
return 0;
}
static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
struct icp_qat_uclo_encapme *image;
int a;
for (a = 0; a < obj_handle->uimage_num; a++) {
image = &obj_handle->ae_uimage[a];
image->uwords_num = image->page->beg_addr_p +
image->page->micro_words_num;
}
}
static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int ae;
obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
obj_handle->obj_hdr->file_buff;
obj_handle->uword_in_bytes = 6;
obj_handle->prod_type = qat_uclo_get_dev_type(handle);
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
pr_err("QAT: UOF incompatible\n");
return -EINVAL;
}
obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
GFP_KERNEL);
if (!obj_handle->uword_buf)
return -ENOMEM;
obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
if (!obj_handle->obj_hdr->file_buff ||
!qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
&obj_handle->str_table)) {
pr_err("QAT: UOF doesn't have effective images\n");
goto out_err;
}
obj_handle->uimage_num =
qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
if (!obj_handle->uimage_num)
goto out_err;
if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
pr_err("QAT: Bad object\n");
goto out_check_uof_aemask_err;
}
qat_uclo_init_uword_num(handle);
qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
&obj_handle->init_mem_tab);
if (qat_uclo_set_ae_mode(handle))
goto out_check_uof_aemask_err;
return 0;
out_check_uof_aemask_err:
for (ae = 0; ae < obj_handle->uimage_num; ae++)
kfree(obj_handle->ae_uimage[ae].page);
out_err:
kfree(obj_handle->uword_buf);
return -EFAULT;
}
static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
{
unsigned int check_sum = 0;
unsigned int min_ver_offset = 0;
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
suof_handle->file_id = ICP_QAT_SUOF_FID;
suof_handle->suof_buf = (char *)suof_ptr;
suof_handle->suof_size = suof_size;
min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
min_ver);
check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
min_ver_offset);
if (check_sum != suof_ptr->check_sum) {
pr_err("QAT: incorrect SUOF checksum\n");
return -EINVAL;
}
suof_handle->check_sum = suof_ptr->check_sum;
suof_handle->min_ver = suof_ptr->min_ver;
suof_handle->maj_ver = suof_ptr->maj_ver;
suof_handle->fw_type = suof_ptr->fw_type;
return 0;
}
static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr *suof_img_hdr,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
struct icp_qat_simg_ae_mode *ae_mode;
struct icp_qat_suof_objhdr *suof_objhdr;
suof_img_hdr->simg_buf = (suof_handle->suof_buf +
suof_chunk_hdr->offset +
sizeof(*suof_objhdr));
suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
(suof_handle->suof_buf +
suof_chunk_hdr->offset))->img_length;
suof_img_hdr->css_header = suof_img_hdr->simg_buf;
suof_img_hdr->css_key = (suof_img_hdr->css_header +
sizeof(struct icp_qat_css_hdr));
suof_img_hdr->css_signature = suof_img_hdr->css_key +
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
suof_img_hdr->css_simg = suof_img_hdr->css_signature +
ICP_QAT_CSS_SIGNATURE_LEN(handle);
ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
suof_img_hdr->ae_mask = ae_mode->ae_mask;
suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
suof_img_hdr->fw_type = ae_mode->fw_type;
}
static void
qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
char **sym_str = (char **)&suof_handle->sym_str;
unsigned int *sym_size = &suof_handle->sym_size;
struct icp_qat_suof_strtable *str_table_obj;
*sym_size = *(unsigned int *)(uintptr_t)
(suof_chunk_hdr->offset + suof_handle->suof_buf);
*sym_str = (char *)(uintptr_t)
(suof_handle->suof_buf + suof_chunk_hdr->offset +
sizeof(str_table_obj->tab_length));
}
static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr *img_hdr)
{
struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
unsigned int prod_rev, maj_ver, prod_type;
prod_type = qat_uclo_get_dev_type(handle);
img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (img_ae_mode->dev_type != prod_type) {
pr_err("QAT: incompatible product type %x\n",
img_ae_mode->dev_type);
return -EINVAL;
}
maj_ver = prod_rev & 0xff;
if (maj_ver > img_ae_mode->devmax_ver ||
maj_ver < img_ae_mode->devmin_ver) {
pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
return -EINVAL;
}
return 0;
}
static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
kfree(sobj_handle->img_table.simg_hdr);
sobj_handle->img_table.simg_hdr = NULL;
kfree(handle->sobj_handle);
handle->sobj_handle = NULL;
}
static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
unsigned int img_id, unsigned int num_simgs)
{
struct icp_qat_suof_img_hdr img_header;
if (img_id != num_simgs - 1) {
memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
sizeof(*suof_img_hdr));
memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
sizeof(*suof_img_hdr));
memcpy(&suof_img_hdr[img_id], &img_header,
sizeof(*suof_img_hdr));
}
}
static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
unsigned int i = 0;
struct icp_qat_suof_img_hdr img_header;
if (!suof_ptr || suof_size == 0) {
pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
return -EINVAL;
}
if (qat_uclo_check_suof_format(suof_ptr))
return -EINVAL;
ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
if (ret)
return ret;
suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
((uintptr_t)suof_ptr + sizeof(*suof_ptr));
qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
if (suof_handle->img_table.num_simgs != 0) {
suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
sizeof(img_header),
GFP_KERNEL);
if (!suof_img_hdr)
return -ENOMEM;
suof_handle->img_table.simg_hdr = suof_img_hdr;
for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
qat_uclo_map_simg(handle, &suof_img_hdr[i],
&suof_chunk_hdr[1 + i]);
ret = qat_uclo_check_simg_compat(handle,
&suof_img_hdr[i]);
if (ret)
return ret;
suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
ae0_img = i;
}
if (!handle->chip_info->tgroup_share_ustore) {
qat_uclo_tail_img(suof_img_hdr, ae0_img,
suof_handle->img_table.num_simgs);
}
}
return 0;
}
#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
u32 fcu_sts, retry = 0;
u32 fcu_ctl_csr, fcu_sts_csr;
u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
u64 bus_addr;
bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
- sizeof(struct icp_qat_auth_chunk);
fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
goto auth_fail;
if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
return 0;
} while (retry++ < FW_AUTH_MAX_RETRY);
auth_fail:
pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
fcu_sts & FCU_AUTH_STS_MASK, retry);
return -EINVAL;
}
static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
int imgid)
{
struct icp_qat_suof_handle *sobj_handle;
if (!handle->chip_info->tgroup_share_ustore)
return false;
sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
if (handle->hal_handle->admin_ae_mask &
sobj_handle->img_table.simg_hdr[imgid].ae_mask)
return false;
return true;
}
static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned long desc_ae_mask = desc->ae_mask;
u32 fcu_sts, ae_broadcast_mask = 0;
u32 fcu_loaded_csr, ae_loaded;
u32 fcu_sts_csr, fcu_ctl_csr;
unsigned int ae, retry = 0;
if (handle->chip_info->tgroup_share_ustore) {
fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
} else {
pr_err("Chip 0x%x doesn't support broadcast load\n",
handle->pci_dev->device);
return -EINVAL;
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
return -EINVAL;
}
if (test_bit(ae, &desc_ae_mask))
ae_broadcast_mask |= 1 << ae;
}
if (ae_broadcast_mask) {
SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
ae_broadcast_mask);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
fcu_sts &= FCU_AUTH_STS_MASK;
if (fcu_sts == FCU_STS_LOAD_FAIL) {
pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
return -EINVAL;
} else if (fcu_sts == FCU_STS_LOAD_DONE) {
ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
break;
}
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
pr_err("QAT: broadcast load failed timeout %d\n", retry);
return -EINVAL;
}
}
return 0;
}
static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
struct icp_firml_dram_desc *dram_desc,
unsigned int size)
{
void *vptr;
dma_addr_t ptr;
vptr = dma_alloc_coherent(&handle->pci_dev->dev,
size, &ptr, GFP_KERNEL);
if (!vptr)
return -ENOMEM;
dram_desc->dram_base_addr_v = vptr;
dram_desc->dram_bus_addr = ptr;
dram_desc->dram_size = size;
return 0;
}
static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
struct icp_firml_dram_desc *dram_desc)
{
if (handle && dram_desc && dram_desc->dram_base_addr_v) {
dma_free_coherent(&handle->pci_dev->dev,
(size_t)(dram_desc->dram_size),
dram_desc->dram_base_addr_v,
dram_desc->dram_bus_addr);
}
if (dram_desc)
memset(dram_desc, 0, sizeof(*dram_desc));
}
static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc **desc)
{
struct icp_firml_dram_desc dram_desc;
if (*desc) {
dram_desc.dram_base_addr_v = *desc;
dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
(*desc))->chunk_bus_addr;
dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
(*desc))->chunk_size;
qat_uclo_simg_free(handle, &dram_desc);
}
}
static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
unsigned int fw_type)
{
char *fw_type_name = fw_type ? "MMP" : "AE";
unsigned int css_dword_size = sizeof(u32);
if (handle->chip_info->fw_auth) {
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
if ((css_hdr->header_len * css_dword_size) != header_len)
goto err;
if ((css_hdr->size * css_dword_size) != size)
goto err;
if (fw_type != css_hdr->fw_type)
goto err;
if (size <= header_len)
goto err;
size -= header_len;
}
if (fw_type == CSS_AE_FIRMWARE) {
if (size < sizeof(struct icp_qat_simg_ae_mode *) +
ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
goto err;
if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
goto err;
} else if (fw_type == CSS_MMP_FIRMWARE) {
if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
goto err;
} else {
pr_err("QAT: Unsupported firmware type\n");
return -EINVAL;
}
return 0;
err:
pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
return -EINVAL;
}
static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
struct icp_qat_fw_auth_desc **desc)
{
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
struct icp_qat_fw_auth_desc *auth_desc;
struct icp_qat_auth_chunk *auth_chunk;
u64 virt_addr, bus_addr, virt_base;
unsigned int length, simg_offset = sizeof(*auth_chunk);
struct icp_qat_simg_ae_mode *simg_ae_mode;
struct icp_firml_dram_desc img_desc;
if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return -EINVAL;
}
length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
pr_err("QAT: error, allocate continuous dram fail\n");
return -ENOMEM;
}
auth_chunk = img_desc.dram_base_addr_v;
auth_chunk->chunk_size = img_desc.dram_size;
auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
bus_addr = img_desc.dram_bus_addr + simg_offset;
auth_desc = img_desc.dram_base_addr_v;
auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->css_hdr_low = (unsigned int)bus_addr;
virt_addr = virt_base;
memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
/* pub key */
bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + sizeof(*css_hdr)),
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
/* padding */
memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
/* exponent */
memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
(void *)(image + sizeof(*css_hdr) +
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->signature_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + sizeof(*css_hdr) +
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
ICP_QAT_CSS_SIGNATURE_LEN(handle));
bus_addr = ADD_ADDR(auth_desc->signature_high,
auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->img_low = (unsigned int)bus_addr;
auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
CSS_AE_FIRMWARE) {
auth_desc->img_ae_mode_data_high = auth_desc->img_high;
auth_desc->img_ae_mode_data_low = auth_desc->img_low;
bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
auth_desc->img_ae_init_data_high = (unsigned int)
(bus_addr >> BITS_IN_DWORD);
auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
auth_desc->img_ae_insts_high = (unsigned int)
(bus_addr >> BITS_IN_DWORD);
auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
virt_addr += sizeof(struct icp_qat_css_hdr);
virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
} else {
auth_desc->img_ae_insts_high = auth_desc->img_high;
auth_desc->img_ae_insts_low = auth_desc->img_low;
}
*desc = auth_desc;
return 0;
}
static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
unsigned long ae_mask = handle->hal_handle->ae_mask;
u32 fcu_sts_csr, fcu_ctl_csr;
u32 loaded_aes, loaded_csr;
unsigned int i;
u32 fcu_sts;
fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
int retry = 0;
if (!((desc->ae_mask >> i) & 0x1))
continue;
if (qat_hal_check_ae_active(handle, i)) {
pr_err("QAT: AE %d is active\n", i);
return -EINVAL;
}
SET_CAP_CSR(handle, fcu_ctl_csr,
(FCU_CTRL_CMD_LOAD |
(1 << FCU_CTRL_BROADCAST_POS) |
(i << FCU_CTRL_AE_POS)));
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
if ((fcu_sts & FCU_AUTH_STS_MASK) ==
FCU_STS_LOAD_DONE) {
loaded_aes = GET_CAP_CSR(handle, loaded_csr);
loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
if (loaded_aes & (1 << i))
break;
}
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
pr_err("QAT: firmware load failed timeout %x\n", retry);
return -EINVAL;
}
}
return 0;
}
static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
struct icp_qat_suof_handle *suof_handle;
suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
if (!suof_handle)
return -ENOMEM;
handle->sobj_handle = suof_handle;
if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
qat_uclo_del_suof(handle);
pr_err("QAT: map SUOF failed\n");
return -EINVAL;
}
return 0;
}
int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
struct icp_qat_fw_auth_desc *desc = NULL;
int status = 0;
int ret;
ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
if (ret)
return ret;
if (handle->chip_info->fw_auth) {
status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
if (!status)
status = qat_uclo_auth_fw(handle, desc);
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
if (handle->chip_info->mmp_sram_size < mem_size) {
pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
}
return status;
}
static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
struct icp_qat_uof_filehdr *filehdr;
struct icp_qat_uclo_objhandle *objhdl;
objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
if (!objhdl)
return -ENOMEM;
objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
if (!objhdl->obj_buf)
goto out_objbuf_err;
filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
if (qat_uclo_check_uof_format(filehdr))
goto out_objhdr_err;
objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
ICP_QAT_UOF_OBJS);
if (!objhdl->obj_hdr) {
pr_err("QAT: object file chunk is null\n");
goto out_objhdr_err;
}
handle->obj_handle = objhdl;
if (qat_uclo_parse_uof_obj(handle))
goto out_overlay_obj_err;
return 0;
out_overlay_obj_err:
handle->obj_handle = NULL;
kfree(objhdl->obj_hdr);
out_objhdr_err:
kfree(objhdl->obj_buf);
out_objbuf_err:
kfree(objhdl);
return -ENOMEM;
}
static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_mof_file_hdr *mof_ptr,
u32 mof_size)
{
struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
unsigned int min_ver_offset;
unsigned int checksum;
mobj_handle->file_id = ICP_QAT_MOF_FID;
mobj_handle->mof_buf = (char *)mof_ptr;
mobj_handle->mof_size = mof_size;
min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
min_ver);
checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
min_ver_offset);
if (checksum != mof_ptr->checksum) {
pr_err("QAT: incorrect MOF checksum\n");
return -EINVAL;
}
mobj_handle->checksum = mof_ptr->checksum;
mobj_handle->min_ver = mof_ptr->min_ver;
mobj_handle->maj_ver = mof_ptr->maj_ver;
return 0;
}
static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
kfree(mobj_handle->obj_table.obj_hdr);
mobj_handle->obj_table.obj_hdr = NULL;
kfree(handle->mobj_handle);
handle->mobj_handle = NULL;
}
static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
const char *obj_name, char **obj_ptr,
unsigned int *obj_size)
{
struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
unsigned int i;
for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
if (!strncmp(obj_hdr[i].obj_name, obj_name,
ICP_QAT_SUOF_OBJ_NAME_LEN)) {
*obj_ptr = obj_hdr[i].obj_buf;
*obj_size = obj_hdr[i].obj_size;
return 0;
}
}
pr_err("QAT: object %s is not found inside MOF\n", obj_name);
return -EINVAL;
}
static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
struct icp_qat_mof_objhdr *mobj_hdr,
struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
{
u8 *obj;
if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
} else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
} else {
pr_err("QAT: unsupported chunk id\n");
return -EINVAL;
}
mobj_hdr->obj_buf = obj;
mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
return 0;
}
static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
{
struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
struct icp_qat_mof_obj_hdr *uobj_hdr;
struct icp_qat_mof_obj_hdr *sobj_hdr;
struct icp_qat_mof_objhdr *mobj_hdr;
unsigned int uobj_chunk_num = 0;
unsigned int sobj_chunk_num = 0;
unsigned int *valid_chunk;
int ret, i;
uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
if (uobj_hdr)
uobj_chunk_num = uobj_hdr->num_chunks;
if (sobj_hdr)
sobj_chunk_num = sobj_hdr->num_chunks;
mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
sizeof(*mobj_hdr), GFP_KERNEL);
if (!mobj_hdr)
return -ENOMEM;
mobj_handle->obj_table.obj_hdr = mobj_hdr;
valid_chunk = &mobj_handle->obj_table.num_objs;
uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
/* map uof objects */
for (i = 0; i < uobj_chunk_num; i++) {
ret = qat_uclo_map_obj_from_mof(mobj_handle,
&mobj_hdr[*valid_chunk],
&uobj_chunkhdr[i]);
if (ret)
return ret;
(*valid_chunk)++;
}
/* map suof objects */
for (i = 0; i < sobj_chunk_num; i++) {
ret = qat_uclo_map_obj_from_mof(mobj_handle,
&mobj_hdr[*valid_chunk],
&sobj_chunkhdr[i]);
if (ret)
return ret;
(*valid_chunk)++;
}
if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
return -EINVAL;
}
return 0;
}
static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
struct icp_qat_mof_chunkhdr *mof_chunkhdr)
{
char **sym_str = (char **)&mobj_handle->sym_str;
unsigned int *sym_size = &mobj_handle->sym_size;
struct icp_qat_mof_str_table *str_table_obj;
*sym_size = *(unsigned int *)(uintptr_t)
(mof_chunkhdr->offset + mobj_handle->mof_buf);
*sym_str = (char *)(uintptr_t)
(mobj_handle->mof_buf + mof_chunkhdr->offset +
sizeof(str_table_obj->tab_len));
}
static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
struct icp_qat_mof_chunkhdr *mof_chunkhdr)
{
char *chunk_id = mof_chunkhdr->chunk_id;
if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
mof_chunkhdr->offset;
else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
mof_chunkhdr->offset;
}
static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
{
int maj = mof_hdr->maj_ver & 0xff;
int min = mof_hdr->min_ver & 0xff;
if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
return -EINVAL;
}
if (mof_hdr->num_chunks <= 0x1) {
pr_err("QAT: MOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
maj, min);
return -EINVAL;
}
return 0;
}
static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_mof_file_hdr *mof_ptr,
u32 mof_size, const char *obj_name,
char **obj_ptr, unsigned int *obj_size)
{
struct icp_qat_mof_chunkhdr *mof_chunkhdr;
unsigned int file_id = mof_ptr->file_id;
struct icp_qat_mof_handle *mobj_handle;
unsigned short chunks_num;
unsigned int i;
int ret;
if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
if (obj_ptr)
*obj_ptr = (char *)mof_ptr;
if (obj_size)
*obj_size = mof_size;
return 0;
}
if (qat_uclo_check_mof_format(mof_ptr))
return -EINVAL;
mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
if (!mobj_handle)
return -ENOMEM;
handle->mobj_handle = mobj_handle;
ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
if (ret)
return ret;
mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
chunks_num = mof_ptr->num_chunks;
/* Parse MOF file chunks */
for (i = 0; i < chunks_num; i++)
qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
/* All sym_objs uobjs and sobjs should be available */
if (!mobj_handle->sym_str ||
(!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
return -EINVAL;
ret = qat_uclo_map_objs_from_mof(mobj_handle);
if (ret)
return ret;
/* Seek specified uof object in MOF */
return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
obj_ptr, obj_size);
}
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, u32 mem_size, const char *obj_name)
{
char *obj_addr;
u32 obj_size;
int ret;
BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
(sizeof(handle->hal_handle->ae_mask) * 8));
if (!handle || !addr_ptr || mem_size < 24)
return -EINVAL;
if (obj_name) {
ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
&obj_addr, &obj_size);
if (ret)
return ret;
} else {
obj_addr = addr_ptr;
obj_size = mem_size;
}
return (handle->chip_info->fw_auth) ?
qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
}
void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int a;
if (handle->mobj_handle)
qat_uclo_del_mof(handle);
if (handle->sobj_handle)
qat_uclo_del_suof(handle);
if (!obj_handle)
return;
kfree(obj_handle->uword_buf);
for (a = 0; a < obj_handle->uimage_num; a++)
kfree(obj_handle->ae_uimage[a].page);
for (a = 0; a < handle->hal_handle->ae_max_num; a++)
qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
kfree(obj_handle->obj_hdr);
kfree(obj_handle->obj_buf);
kfree(obj_handle);
handle->obj_handle = NULL;
}
static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
struct icp_qat_uclo_encap_page *encap_page,
u64 *uword, unsigned int addr_p,
unsigned int raddr, u64 fill)
{
unsigned int i, addr;
u64 uwrd = 0;
if (!encap_page) {
*uword = fill;
return;
}
addr = (encap_page->page_region) ? raddr : addr_p;
for (i = 0; i < encap_page->uwblock_num; i++) {
if (addr >= encap_page->uwblock[i].start_addr &&
addr <= encap_page->uwblock[i].start_addr +
encap_page->uwblock[i].words_num - 1) {
addr -= encap_page->uwblock[i].start_addr;
addr *= obj_handle->uword_in_bytes;
memcpy(&uwrd, (void *)(((uintptr_t)
encap_page->uwblock[i].micro_words) + addr),
obj_handle->uword_in_bytes);
uwrd = uwrd & GENMASK_ULL(43, 0);
}
}
*uword = uwrd;
if (*uword == INVLD_UWORD)
*uword = fill;
}
static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uclo_encap_page
*encap_page, unsigned int ae)
{
unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
u64 fill_pat;
/* load the page starting at appropriate ustore address */
/* get fill-pattern from an image -- they are all the same */
memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
sizeof(u64));
uw_physical_addr = encap_page->beg_addr_p;
uw_relative_addr = 0;
words_num = encap_page->micro_words_num;
while (words_num) {
cpylen = min(words_num, UWORD_CPYBUF_SIZE);
/* load the buffer */
for (i = 0; i < cpylen; i++)
qat_uclo_fill_uwords(obj_handle, encap_page,
&obj_handle->uword_buf[i],
uw_physical_addr + i,
uw_relative_addr + i, fill_pat);
/* copy the buffer to ustore */
qat_hal_wr_uwords(handle, (unsigned char)ae,
uw_physical_addr, cpylen,
obj_handle->uword_buf);
uw_physical_addr += cpylen;
uw_relative_addr += cpylen;
words_num -= cpylen;
}
}
static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_image *image)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned long ae_mask = handle->hal_handle->ae_mask;
unsigned long cfg_ae_mask = handle->cfg_ae_mask;
unsigned long ae_assigned = image->ae_assigned;
struct icp_qat_uclo_aedata *aed;
unsigned int ctx_mask, s;
struct icp_qat_uclo_page *page;
unsigned char ae;
int ctx;
if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
ctx_mask = 0xff;
else
ctx_mask = 0x55;
/* load the default page and set assigned CTX PC
* to the entrypoint address */
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (!test_bit(ae, &cfg_ae_mask))
continue;
if (!test_bit(ae, &ae_assigned))
continue;
aed = &obj_handle->ae_data[ae];
/* find the slice to which this image is assigned */
for (s = 0; s < aed->slice_num; s++) {
if (image->ctx_assigned &
aed->ae_slices[s].ctx_mask_assigned)
break;
}
if (s >= aed->slice_num)
continue;
page = aed->ae_slices[s].page;
if (!page->encap_page->def_page)
continue;
qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
page = aed->ae_slices[s].page;
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
aed->ae_slices[s].cur_page[ctx] =
(ctx_mask & (1 << ctx)) ? page : NULL;
qat_hal_set_live_ctx(handle, (unsigned char)ae,
image->ctx_assigned);
qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
image->entry_address);
}
}
static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
{
unsigned int i;
struct icp_qat_fw_auth_desc *desc = NULL;
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
int ret;
for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
simg_hdr[i].simg_len,
CSS_AE_FIRMWARE);
if (ret)
return ret;
if (qat_uclo_map_auth_fw(handle,
(char *)simg_hdr[i].simg_buf,
(unsigned int)
simg_hdr[i].simg_len,
&desc))
goto wr_err;
if (qat_uclo_auth_fw(handle, desc))
goto wr_err;
if (qat_uclo_is_broadcast(handle, i)) {
if (qat_uclo_broadcast_load_fw(handle, desc))
goto wr_err;
} else {
if (qat_uclo_load_fw(handle, desc))
goto wr_err;
}
qat_uclo_ummap_auth_fw(handle, &desc);
}
return 0;
wr_err:
qat_uclo_ummap_auth_fw(handle, &desc);
return -EINVAL;
}
static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int i;
if (qat_uclo_init_globals(handle))
return -EINVAL;
for (i = 0; i < obj_handle->uimage_num; i++) {
if (!obj_handle->ae_uimage[i].img_ptr)
return -EINVAL;
if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
return -EINVAL;
qat_uclo_wr_uimage_page(handle,
obj_handle->ae_uimage[i].img_ptr);
}
return 0;
}
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
{
return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
qat_uclo_wr_uof_img(handle);
}
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask)
{
if (!cfg_ae_mask)
return -EINVAL;
handle->cfg_ae_mask = cfg_ae_mask;
return 0;
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_uclo.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/delay.h>
#include <linux/dev_printk.h>
#include <linux/export.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/units.h>
#include <asm/errno.h>
#include "adf_accel_devices.h"
#include "adf_clock.h"
#include "adf_common_drv.h"
#define MEASURE_CLOCK_RETRIES 10
#define MEASURE_CLOCK_DELAY_US 10000
#define ME_CLK_DIVIDER 16
#define MEASURE_CLOCK_DELTA_THRESHOLD_US 100
static inline u64 timespec_to_us(const struct timespec64 *ts)
{
return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_USEC);
}
static inline u64 timespec_to_ms(const struct timespec64 *ts)
{
return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_MSEC);
}
u64 adf_clock_get_current_time(void)
{
struct timespec64 ts;
ktime_get_real_ts64(&ts);
return timespec_to_ms(&ts);
}
static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
{
struct timespec64 ts1, ts2, ts3, ts4;
u64 timestamp1, timestamp2, temp;
u32 delta_us, tries;
int ret;
tries = MEASURE_CLOCK_RETRIES;
do {
ktime_get_real_ts64(&ts1);
ret = adf_get_fw_timestamp(accel_dev, ×tamp1);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to get fw timestamp\n");
return ret;
}
ktime_get_real_ts64(&ts2);
delta_us = timespec_to_us(&ts2) - timespec_to_us(&ts1);
} while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
if (!tries) {
dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
return -ETIMEDOUT;
}
fsleep(MEASURE_CLOCK_DELAY_US);
tries = MEASURE_CLOCK_RETRIES;
do {
ktime_get_real_ts64(&ts3);
if (adf_get_fw_timestamp(accel_dev, ×tamp2)) {
dev_err(&GET_DEV(accel_dev),
"Failed to get fw timestamp\n");
return -EIO;
}
ktime_get_real_ts64(&ts4);
delta_us = timespec_to_us(&ts4) - timespec_to_us(&ts3);
} while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
if (!tries) {
dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
return -ETIMEDOUT;
}
delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
/*
* Enclose the division to allow the preprocessor to precalculate it,
* and avoid promoting r-value to 64-bit before division.
*/
*frequency = temp * (HZ_PER_MHZ / 10);
return 0;
}
/**
* adf_dev_measure_clock() - measures device clock frequency
* @accel_dev: Pointer to acceleration device.
* @frequency: Pointer to variable where result will be stored
* @min: Minimal allowed frequency value
* @max: Maximal allowed frequency value
*
* If the measurement result will go beyond the min/max thresholds the value
* will take the value of the crossed threshold.
*
* This algorithm compares the device firmware timestamp with the kernel
* timestamp. So we can't expect too high accuracy from this measurement.
*
* Return:
* * 0 - measurement succeed
* * -ETIMEDOUT - measurement failed
*/
int adf_dev_measure_clock(struct adf_accel_dev *accel_dev,
u32 *frequency, u32 min, u32 max)
{
int ret;
u32 freq;
ret = measure_clock(accel_dev, &freq);
if (ret)
return ret;
*frequency = clamp(freq, min, max);
if (*frequency != freq)
dev_warn(&GET_DEV(accel_dev),
"Measured clock %d Hz is out of range, assuming %d\n",
freq, *frequency);
return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_measure_clock);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_clock.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_gen2_hw_data.h"
#include "qat_crypto.h"
#include "icp_qat_fw.h"
#define SEC ADF_KERNEL_SEC
static struct service_hndl qat_crypto;
void qat_crypto_put_instance(struct qat_crypto_instance *inst)
{
atomic_dec(&inst->refctr);
adf_dev_put(inst->accel_dev);
}
static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
{
struct qat_crypto_instance *inst, *tmp;
int i;
list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
for (i = 0; i < atomic_read(&inst->refctr); i++)
qat_crypto_put_instance(inst);
if (inst->sym_tx)
adf_remove_ring(inst->sym_tx);
if (inst->sym_rx)
adf_remove_ring(inst->sym_rx);
if (inst->pke_tx)
adf_remove_ring(inst->pke_tx);
if (inst->pke_rx)
adf_remove_ring(inst->pke_rx);
list_del(&inst->list);
kfree(inst);
}
return 0;
}
struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
{
struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
struct qat_crypto_instance *inst = NULL, *tmp_inst;
unsigned long best = ~0;
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
unsigned long ctr;
if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->crypto_list)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
accel_dev = tmp_dev;
best = ctr;
}
}
}
if (!accel_dev) {
pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
if (adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->crypto_list)) {
accel_dev = tmp_dev;
break;
}
}
}
if (!accel_dev)
return NULL;
best = ~0;
list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
unsigned long ctr;
ctr = atomic_read(&tmp_inst->refctr);
if (best > ctr) {
inst = tmp_inst;
best = ctr;
}
}
if (inst) {
if (adf_dev_get(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
return NULL;
}
atomic_inc(&inst->refctr);
}
return inst;
}
/**
* qat_crypto_vf_dev_config()
* create dev config required to create crypto inst.
*
* @accel_dev: Pointer to acceleration device.
*
* Function creates device configuration required to create
* asym, sym or, crypto instances
*
* Return: 0 on success, error code otherwise.
*/
int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
{
u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
dev_err(&GET_DEV(accel_dev),
"Unsupported ring/service mapping present on PF");
return -EFAULT;
}
return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
}
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
{
unsigned long num_inst, num_msg_sym, num_msg_asym;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
unsigned long sym_bank, asym_bank;
struct qat_crypto_instance *inst;
int msg_size;
int ret;
int i;
INIT_LIST_HEAD(&accel_dev->crypto_list);
ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
if (ret)
return ret;
ret = kstrtoul(val, 0, &num_inst);
if (ret)
return ret;
for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!inst) {
ret = -ENOMEM;
goto err;
}
list_add_tail(&inst->list, &accel_dev->crypto_list);
inst->id = i;
atomic_set(&inst->refctr, 0);
inst->accel_dev = accel_dev;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
goto err;
ret = kstrtoul(val, 10, &sym_bank);
if (ret)
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
goto err;
ret = kstrtoul(val, 10, &asym_bank);
if (ret)
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
goto err;
ret = kstrtoul(val, 10, &num_msg_sym);
if (ret)
goto err;
num_msg_sym = num_msg_sym >> 1;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
goto err;
ret = kstrtoul(val, 10, &num_msg_asym);
if (ret)
goto err;
num_msg_asym = num_msg_asym >> 1;
msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
msg_size, key, NULL, 0, &inst->sym_tx);
if (ret)
goto err;
msg_size = msg_size >> 1;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
msg_size, key, NULL, 0, &inst->pke_tx);
if (ret)
goto err;
msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
msg_size, key, qat_alg_callback, 0,
&inst->sym_rx);
if (ret)
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
msg_size, key, qat_alg_asym_callback, 0,
&inst->pke_rx);
if (ret)
goto err;
INIT_LIST_HEAD(&inst->backlog.list);
spin_lock_init(&inst->backlog.lock);
}
return 0;
err:
qat_crypto_free_instances(accel_dev);
return ret;
}
static int qat_crypto_init(struct adf_accel_dev *accel_dev)
{
if (qat_crypto_create_instances(accel_dev))
return -EFAULT;
return 0;
}
static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
{
return qat_crypto_free_instances(accel_dev);
}
static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
enum adf_event event)
{
int ret;
switch (event) {
case ADF_EVENT_INIT:
ret = qat_crypto_init(accel_dev);
break;
case ADF_EVENT_SHUTDOWN:
ret = qat_crypto_shutdown(accel_dev);
break;
case ADF_EVENT_RESTARTING:
case ADF_EVENT_RESTARTED:
case ADF_EVENT_START:
case ADF_EVENT_STOP:
default:
ret = 0;
}
return ret;
}
int qat_crypto_register(void)
{
memset(&qat_crypto, 0, sizeof(qat_crypto));
qat_crypto.event_hld = qat_crypto_event_handler;
qat_crypto.name = "qat_crypto";
return adf_service_register(&qat_crypto);
}
int qat_crypto_unregister(void)
{
return adf_service_unregister(&qat_crypto);
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_crypto.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_heartbeat.h"
#include "icp_qat_fw_init_admin.h"
#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
#define ADF_CONST_TABLE_SIZE 1024
#define ADF_ADMIN_POLL_DELAY_US 20
#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_ONE_AE 1
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
struct adf_admin_comms {
dma_addr_t phy_addr;
dma_addr_t const_tbl_addr;
void *virt_addr;
void *virt_tbl_addr;
void __iomem *mailbox_addr;
struct mutex lock; /* protects adf_admin_comms struct */
};
static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
void *in, void *out)
{
int ret;
u32 status;
struct adf_admin_comms *admin = accel_dev->admin;
int offset = ae * ADF_ADMINMSG_LEN * 2;
void __iomem *mailbox = admin->mailbox_addr;
int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
struct icp_qat_fw_init_admin_req *request = in;
mutex_lock(&admin->lock);
if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
mutex_unlock(&admin->lock);
return -EAGAIN;
}
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
ADF_CSR_WR(mailbox, mb_offset, 1);
ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
ADF_ADMIN_POLL_DELAY_US,
ADF_ADMIN_POLL_TIMEOUT_US, true,
mailbox, mb_offset);
if (ret < 0) {
/* Response timeout */
dev_err(&GET_DEV(accel_dev),
"Failed to send admin msg %d to accelerator %d\n",
request->cmd_id, ae);
} else {
/* Response received from admin message, we can now
* make response data available in "out" parameter.
*/
memcpy(out, admin->virt_addr + offset +
ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
}
mutex_unlock(&admin->lock);
return ret;
}
static int adf_send_admin(struct adf_accel_dev *accel_dev,
struct icp_qat_fw_init_admin_req *req,
struct icp_qat_fw_init_admin_resp *resp,
const unsigned long ae_mask)
{
u32 ae;
for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
resp->status)
return -EFAULT;
return 0;
}
static int adf_init_ae(struct adf_accel_dev *accel_dev)
{
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
u32 ae_mask = hw_device->ae_mask;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_INIT_AE;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
{
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
{
struct icp_qat_fw_init_admin_req req = { };
struct icp_qat_fw_init_admin_resp resp;
unsigned int ae_mask = ADF_ONE_AE;
int ret;
req.cmd_id = ICP_QAT_FW_TIMER_GET;
ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
if (ret)
return ret;
*timestamp = resp.timestamp;
return 0;
}
static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
u32 *capabilities)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct icp_qat_fw_init_admin_resp resp;
struct icp_qat_fw_init_admin_req req;
unsigned long ae_mask;
unsigned long ae;
int ret;
/* Target only service accelerator engines */
ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
*capabilities = 0;
for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
if (ret)
return ret;
*capabilities |= resp.extended_features;
}
return 0;
}
int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps)
{
struct icp_qat_fw_init_admin_resp resp = { };
struct icp_qat_fw_init_admin_req req = { };
int ret;
req.cmd_id = ICP_QAT_FW_COUNTERS_GET;
ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
if (ret || resp.status)
return -EFAULT;
*reqs = resp.req_rec_count;
*resps = resp.resp_sent_count;
return 0;
}
int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
{
u32 ae_mask = accel_dev->hw_device->ae_mask;
struct icp_qat_fw_init_admin_req req = { };
struct icp_qat_fw_init_admin_resp resp = { };
req.cmd_id = ICP_QAT_FW_SYNC;
req.int_timer_ticks = cnt;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
{
u32 ae_mask = accel_dev->hw_device->ae_mask;
struct icp_qat_fw_init_admin_req req = { };
struct icp_qat_fw_init_admin_resp resp;
req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr;
req.heartbeat_ticks = ticks;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
*
* Function sends admin init message to the FW
*
* Return: 0 on success, error code otherwise.
*/
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
{
u32 dc_capabilities = 0;
int ret;
ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
if (ret) {
dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
return ret;
}
accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
ret = adf_set_fw_constants(accel_dev);
if (ret)
return ret;
return adf_init_ae(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
/**
* adf_init_admin_pm() - Function sends PM init message to FW
* @accel_dev: Pointer to acceleration device.
* @idle_delay: QAT HW idle time before power gating is initiated.
* 000 - 64us
* 001 - 128us
* 010 - 256us
* 011 - 512us
* 100 - 1ms
* 101 - 2ms
* 110 - 4ms
* 111 - 8ms
*
* Function sends to the FW the admin init message for the PM state
* configuration.
*
* Return: 0 on success, error code otherwise.
*/
int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct icp_qat_fw_init_admin_resp resp = {0};
struct icp_qat_fw_init_admin_req req = {0};
u32 ae_mask = hw_data->admin_ae_mask;
if (!accel_dev->admin) {
dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
return -EFAULT;
}
req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
req.idle_filter = idle_delay;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
struct admin_info admin_csrs_info;
u32 mailbox_offset, adminmsg_u, adminmsg_l;
void __iomem *mailbox;
u64 reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
&admin->phy_addr, GFP_KERNEL);
if (!admin->virt_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
kfree(admin);
return -ENOMEM;
}
admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
PAGE_SIZE,
&admin->const_tbl_addr,
GFP_KERNEL);
if (!admin->virt_tbl_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
admin->virt_addr, admin->phy_addr);
kfree(admin);
return -ENOMEM;
}
memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
hw_data->get_admin_info(&admin_csrs_info);
mailbox_offset = admin_csrs_info.mailbox_offset;
mailbox = pmisc_addr + mailbox_offset;
adminmsg_u = admin_csrs_info.admin_msg_ur;
adminmsg_l = admin_csrs_info.admin_msg_lr;
reg_val = (u64)admin->phy_addr;
ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
mutex_init(&admin->lock);
admin->mailbox_addr = mailbox;
accel_dev->admin = admin;
return 0;
}
EXPORT_SYMBOL_GPL(adf_init_admin_comms);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin = accel_dev->admin;
if (!admin)
return;
if (admin->virt_addr)
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
admin->virt_addr, admin->phy_addr);
if (admin->virt_tbl_addr)
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
admin->virt_tbl_addr, admin->const_tbl_addr);
mutex_destroy(&admin->lock);
kfree(admin);
accel_dev->admin = NULL;
}
EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_admin.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_cfg_common.h"
#include "adf_cfg_user.h"
#define ADF_CFG_MAX_SECTION 512
#define ADF_CFG_MAX_KEY_VAL 256
#define DEVICE_NAME "qat_adf_ctl"
static DEFINE_MUTEX(adf_ctl_lock);
static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
static const struct file_operations adf_ctl_ops = {
.owner = THIS_MODULE,
.unlocked_ioctl = adf_ctl_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
struct adf_ctl_drv_info {
unsigned int major;
struct cdev drv_cdev;
struct class *drv_class;
};
static struct adf_ctl_drv_info adf_ctl_drv;
static void adf_chr_drv_destroy(void)
{
device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
cdev_del(&adf_ctl_drv.drv_cdev);
class_destroy(adf_ctl_drv.drv_class);
unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
}
static int adf_chr_drv_create(void)
{
dev_t dev_id;
struct device *drv_device;
if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
pr_err("QAT: unable to allocate chrdev region\n");
return -EFAULT;
}
adf_ctl_drv.drv_class = class_create(DEVICE_NAME);
if (IS_ERR(adf_ctl_drv.drv_class)) {
pr_err("QAT: class_create failed for adf_ctl\n");
goto err_chrdev_unreg;
}
adf_ctl_drv.major = MAJOR(dev_id);
cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
pr_err("QAT: cdev add failed\n");
goto err_class_destr;
}
drv_device = device_create(adf_ctl_drv.drv_class, NULL,
MKDEV(adf_ctl_drv.major, 0),
NULL, DEVICE_NAME);
if (IS_ERR(drv_device)) {
pr_err("QAT: failed to create device\n");
goto err_cdev_del;
}
return 0;
err_cdev_del:
cdev_del(&adf_ctl_drv.drv_cdev);
err_class_destr:
class_destroy(adf_ctl_drv.drv_class);
err_chrdev_unreg:
unregister_chrdev_region(dev_id, 1);
return -EFAULT;
}
static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
unsigned long arg)
{
struct adf_user_cfg_ctl_data *cfg_data;
cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
if (!cfg_data)
return -ENOMEM;
/* Initialize device id to NO DEVICE as 0 is a valid device id */
cfg_data->device_id = ADF_CFG_NO_DEVICE;
if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
pr_err("QAT: failed to copy from user cfg_data.\n");
kfree(cfg_data);
return -EIO;
}
*ctl_data = cfg_data;
return 0;
}
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
const char *section,
const struct adf_user_cfg_key_val *key_val)
{
if (key_val->type == ADF_HEX) {
long *ptr = (long *)key_val->val;
long val = *ptr;
if (adf_cfg_add_key_value_param(accel_dev, section,
key_val->key, (void *)val,
key_val->type)) {
dev_err(&GET_DEV(accel_dev),
"failed to add hex keyvalue.\n");
return -EFAULT;
}
} else {
if (adf_cfg_add_key_value_param(accel_dev, section,
key_val->key, key_val->val,
key_val->type)) {
dev_err(&GET_DEV(accel_dev),
"failed to add keyvalue.\n");
return -EFAULT;
}
}
return 0;
}
static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
struct adf_user_cfg_ctl_data *ctl_data)
{
struct adf_user_cfg_key_val key_val;
struct adf_user_cfg_key_val *params_head;
struct adf_user_cfg_section section, *section_head;
int i, j;
section_head = ctl_data->config_section;
for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
if (copy_from_user(§ion, (void __user *)section_head,
sizeof(*section_head))) {
dev_err(&GET_DEV(accel_dev),
"failed to copy section info\n");
goto out_err;
}
if (adf_cfg_section_add(accel_dev, section.name)) {
dev_err(&GET_DEV(accel_dev),
"failed to add section.\n");
goto out_err;
}
params_head = section.params;
for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
if (copy_from_user(&key_val, (void __user *)params_head,
sizeof(key_val))) {
dev_err(&GET_DEV(accel_dev),
"Failed to copy keyvalue.\n");
goto out_err;
}
if (adf_add_key_value_data(accel_dev, section.name,
&key_val)) {
goto out_err;
}
params_head = key_val.next;
}
section_head = section.next;
}
return 0;
out_err:
adf_cfg_del_all(accel_dev);
return -EFAULT;
}
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
ret = adf_ctl_alloc_resources(&ctl_data, arg);
if (ret)
return ret;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
ret = -EFAULT;
goto out;
}
if (adf_dev_started(accel_dev)) {
ret = -EFAULT;
goto out;
}
if (adf_copy_key_value_data(accel_dev, ctl_data)) {
ret = -EFAULT;
goto out;
}
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
out:
kfree(ctl_data);
return ret;
}
static int adf_ctl_is_device_in_use(int id)
{
struct adf_accel_dev *dev;
list_for_each_entry(dev, adf_devmgr_get_head(), list) {
if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
dev_info(&GET_DEV(dev),
"device qat_dev%d is busy\n",
dev->accel_id);
return -EBUSY;
}
}
}
return 0;
}
static void adf_ctl_stop_devices(u32 id)
{
struct adf_accel_dev *accel_dev;
list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
/* First stop all VFs */
if (!accel_dev->is_vf)
continue;
adf_dev_down(accel_dev, false);
}
}
list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
adf_dev_down(accel_dev, false);
}
}
}
static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
ret = adf_ctl_alloc_resources(&ctl_data, arg);
if (ret)
return ret;
if (adf_devmgr_verify_id(ctl_data->device_id)) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
ret = -ENODEV;
goto out;
}
ret = adf_ctl_is_device_in_use(ctl_data->device_id);
if (ret)
goto out;
if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
pr_info("QAT: Stopping all acceleration devices.\n");
else
pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
ctl_data->device_id);
adf_ctl_stop_devices(ctl_data->device_id);
out:
kfree(ctl_data);
return ret;
}
static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
ret = adf_ctl_alloc_resources(&ctl_data, arg);
if (ret)
return ret;
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev)
goto out;
dev_info(&GET_DEV(accel_dev),
"Starting acceleration device qat_dev%d.\n",
ctl_data->device_id);
ret = adf_dev_up(accel_dev, false);
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
ctl_data->device_id);
adf_dev_down(accel_dev, false);
}
out:
kfree(ctl_data);
return ret;
}
static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
unsigned long arg)
{
u32 num_devices = 0;
adf_devmgr_get_num_dev(&num_devices);
if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
return -EFAULT;
return 0;
}
static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
unsigned long arg)
{
struct adf_hw_device_data *hw_data;
struct adf_dev_status_info dev_info;
struct adf_accel_dev *accel_dev;
if (copy_from_user(&dev_info, (void __user *)arg,
sizeof(struct adf_dev_status_info))) {
pr_err("QAT: failed to copy from user.\n");
return -EFAULT;
}
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
if (!accel_dev)
return -ENODEV;
hw_data = accel_dev->hw_device;
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info.num_ae = hw_data->get_num_aes(hw_data);
dev_info.num_accel = hw_data->get_num_accels(hw_data);
dev_info.num_logical_accel = hw_data->num_logical_accel;
dev_info.banks_per_accel = hw_data->num_banks
/ hw_data->num_logical_accel;
strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
dev_info.instance_id = hw_data->instance_id;
dev_info.type = hw_data->dev_class->type;
dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
if (copy_to_user((void __user *)arg, &dev_info,
sizeof(struct adf_dev_status_info))) {
dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
return -EFAULT;
}
return 0;
}
static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int ret;
if (mutex_lock_interruptible(&adf_ctl_lock))
return -EFAULT;
switch (cmd) {
case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
break;
case IOCTL_STOP_ACCEL_DEV:
ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
break;
case IOCTL_START_ACCEL_DEV:
ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
break;
case IOCTL_GET_NUM_DEVICES:
ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
break;
case IOCTL_STATUS_ACCEL_DEV:
ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
break;
default:
pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
ret = -EFAULT;
break;
}
mutex_unlock(&adf_ctl_lock);
return ret;
}
static int __init adf_register_ctl_device_driver(void)
{
if (adf_chr_drv_create())
goto err_chr_dev;
if (adf_init_misc_wq())
goto err_misc_wq;
if (adf_init_aer())
goto err_aer;
if (adf_init_pf_wq())
goto err_pf_wq;
if (adf_init_vf_wq())
goto err_vf_wq;
if (qat_crypto_register())
goto err_crypto_register;
if (qat_compression_register())
goto err_compression_register;
return 0;
err_compression_register:
qat_crypto_unregister();
err_crypto_register:
adf_exit_vf_wq();
err_vf_wq:
adf_exit_pf_wq();
err_pf_wq:
adf_exit_aer();
err_aer:
adf_exit_misc_wq();
err_misc_wq:
adf_chr_drv_destroy();
err_chr_dev:
mutex_destroy(&adf_ctl_lock);
return -EFAULT;
}
static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_misc_wq();
adf_exit_aer();
adf_exit_vf_wq();
adf_exit_pf_wq();
qat_crypto_unregister();
qat_compression_unregister();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
}
module_init(adf_register_ctl_device_driver);
module_exit(adf_unregister_ctl_device_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS_CRYPTO("intel_qat");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "adf_accel_devices.h"
#include "adf_transport_internal.h"
#include "adf_transport_access_macros.h"
static DEFINE_MUTEX(ring_read_lock);
static DEFINE_MUTEX(bank_read_lock);
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
mutex_lock(&ring_read_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
return NULL;
return ring->base_addr +
(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
}
static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
return NULL;
return ring->base_addr +
(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
}
static int adf_ring_show(struct seq_file *sfile, void *v)
{
struct adf_etr_ring_data *ring = sfile->private;
struct adf_etr_bank_data *bank = ring->bank;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
void __iomem *csr = ring->bank->csr_addr;
if (v == SEQ_START_TOKEN) {
int head, tail, empty;
head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
ring->ring_number);
tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
ring->ring_number);
empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
seq_puts(sfile, "------- Ring configuration -------\n");
seq_printf(sfile, "ring name: %s\n",
ring->ring_debug->ring_name);
seq_printf(sfile, "ring num %d, bank num %d\n",
ring->ring_number, ring->bank->bank_number);
seq_printf(sfile, "head %x, tail %x, empty: %d\n",
head, tail, (empty & 1 << ring->ring_number)
>> ring->ring_number);
seq_printf(sfile, "ring size %lld, msg size %d\n",
(long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
seq_puts(sfile, "----------- Ring data ------------\n");
return 0;
}
seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
return 0;
}
static void adf_ring_stop(struct seq_file *sfile, void *v)
{
mutex_unlock(&ring_read_lock);
}
static const struct seq_operations adf_ring_debug_sops = {
.start = adf_ring_start,
.next = adf_ring_next,
.stop = adf_ring_stop,
.show = adf_ring_show
};
DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
{
struct adf_etr_ring_debug_entry *ring_debug;
char entry_name[8];
ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
if (!ring_debug)
return -ENOMEM;
strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
snprintf(entry_name, sizeof(entry_name), "ring_%02d",
ring->ring_number);
ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
ring->bank->bank_debug_dir,
ring, &adf_ring_debug_fops);
ring->ring_debug = ring_debug;
return 0;
}
void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
{
if (ring->ring_debug) {
debugfs_remove(ring->ring_debug->debug);
kfree(ring->ring_debug);
ring->ring_debug = NULL;
}
}
static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_bank_data *bank = sfile->private;
u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
mutex_lock(&bank_read_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
if (*pos >= num_rings_per_bank)
return NULL;
return pos;
}
static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_etr_bank_data *bank = sfile->private;
u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
if (++(*pos) >= num_rings_per_bank)
return NULL;
return pos;
}
static int adf_bank_show(struct seq_file *sfile, void *v)
{
struct adf_etr_bank_data *bank = sfile->private;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
if (v == SEQ_START_TOKEN) {
seq_printf(sfile, "------- Bank %d configuration -------\n",
bank->bank_number);
} else {
int ring_id = *((int *)v) - 1;
struct adf_etr_ring_data *ring = &bank->rings[ring_id];
void __iomem *csr = bank->csr_addr;
int head, tail, empty;
if (!(bank->ring_mask & 1 << ring_id))
return 0;
head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
ring->ring_number);
tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
ring->ring_number);
empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
seq_printf(sfile,
"ring num %02d, head %04x, tail %04x, empty: %d\n",
ring->ring_number, head, tail,
(empty & 1 << ring->ring_number) >>
ring->ring_number);
}
return 0;
}
static void adf_bank_stop(struct seq_file *sfile, void *v)
{
mutex_unlock(&bank_read_lock);
}
static const struct seq_operations adf_bank_debug_sops = {
.start = adf_bank_start,
.next = adf_bank_next,
.stop = adf_bank_stop,
.show = adf_bank_show
};
DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
{
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct dentry *parent = accel_dev->transport->debug;
char name[8];
snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
bank->bank_debug_dir = debugfs_create_dir(name, parent);
bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
bank->bank_debug_dir, bank,
&adf_bank_debug_fops);
return 0;
}
void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
{
debugfs_remove(bank->bank_debug_cfg);
debugfs_remove(bank->bank_debug_dir);
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_transport_debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include <linux/crypto.h>
#include <crypto/acompress.h>
#include <crypto/internal/acompress.h>
#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "qat_bl.h"
#include "qat_comp_req.h"
#include "qat_compression.h"
#include "qat_algs_send.h"
#define QAT_RFC_1950_HDR_SIZE 2
#define QAT_RFC_1950_FOOTER_SIZE 4
#define QAT_RFC_1950_CM_DEFLATE 8
#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
#define QAT_RFC_1950_CM_MASK 0x0f
#define QAT_RFC_1950_CM_OFFSET 4
#define QAT_RFC_1950_DICT_MASK 0x20
#define QAT_RFC_1950_COMP_HDR 0x785e
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
enum direction {
DECOMPRESSION = 0,
COMPRESSION = 1,
};
struct qat_compression_req;
struct qat_compression_ctx {
u8 comp_ctx[QAT_COMP_CTX_SIZE];
struct qat_compression_instance *inst;
int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
};
struct qat_dst {
bool is_null;
int resubmitted;
};
struct qat_compression_req {
u8 req[QAT_COMP_REQ_SIZE];
struct qat_compression_ctx *qat_compression_ctx;
struct acomp_req *acompress_req;
struct qat_request_buffs buf;
enum direction dir;
int actual_dlen;
struct qat_alg_req alg_req;
struct work_struct resubmit;
struct qat_dst dst;
};
static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
struct qat_compression_instance *inst,
struct crypto_async_request *base)
{
struct qat_alg_req *alg_req = &qat_req->alg_req;
alg_req->fw_req = (u32 *)&qat_req->req;
alg_req->tx_ring = inst->dc_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(alg_req);
}
static void qat_comp_resubmit(struct work_struct *work)
{
struct qat_compression_req *qat_req =
container_of(work, struct qat_compression_req, resubmit);
struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
struct qat_request_buffs *qat_bufs = &qat_req->buf;
struct qat_compression_instance *inst = ctx->inst;
struct acomp_req *areq = qat_req->acompress_req;
struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
u8 *req = qat_req->req;
dma_addr_t dfbuf;
int ret;
areq->dlen = dlen;
dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
qat_algs_alloc_flags(&areq->base));
if (ret)
goto err;
qat_req->dst.resubmitted = true;
dfbuf = qat_req->buf.bloutp;
qat_comp_override_dst(req, dfbuf, dlen);
ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
if (ret != -ENOSPC)
return;
err:
qat_bl_free_bufl(accel_dev, qat_bufs);
acomp_request_complete(areq, ret);
}
static int parse_zlib_header(u16 zlib_h)
{
int ret = -EINVAL;
__be16 header;
u8 *header_p;
u8 cmf, flg;
header = cpu_to_be16(zlib_h);
header_p = (u8 *)&header;
flg = header_p[0];
cmf = header_p[1];
if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
return ret;
if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
return ret;
if (flg & QAT_RFC_1950_DICT_MASK)
return ret;
return 0;
}
static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
void *resp)
{
struct acomp_req *areq = qat_req->acompress_req;
enum direction dir = qat_req->dir;
__be32 qat_produced_adler;
qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
if (dir == COMPRESSION) {
__be16 zlib_header;
zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
areq->dlen += QAT_RFC_1950_HDR_SIZE;
scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
QAT_RFC_1950_FOOTER_SIZE, 1);
areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
} else {
__be32 decomp_adler;
int footer_offset;
int consumed;
consumed = qat_comp_get_consumed_ctr(resp);
footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
return -EBADMSG;
scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
QAT_RFC_1950_FOOTER_SIZE, 0);
if (qat_produced_adler != decomp_adler)
return -EBADMSG;
}
return 0;
}
static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
void *resp)
{
struct acomp_req *areq = qat_req->acompress_req;
struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
struct qat_compression_instance *inst = ctx->inst;
int consumed, produced;
s8 cmp_err, xlt_err;
int res = -EBADMSG;
int status;
u8 cnv;
status = qat_comp_get_cmp_status(resp);
status |= qat_comp_get_xlt_status(resp);
cmp_err = qat_comp_get_cmp_err(resp);
xlt_err = qat_comp_get_xlt_err(resp);
consumed = qat_comp_get_consumed_ctr(resp);
produced = qat_comp_get_produced_ctr(resp);
dev_dbg(&GET_DEV(accel_dev),
"[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
qat_req->dir == COMPRESSION ? "comp " : "decomp",
status ? "ERR" : "OK ",
areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
areq->dlen = 0;
if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
if (qat_req->dst.resubmitted) {
dev_dbg(&GET_DEV(accel_dev),
"Output does not fit destination buffer\n");
res = -EOVERFLOW;
goto end;
}
INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
adf_misc_wq_queue_work(&qat_req->resubmit);
return;
}
}
if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
goto end;
if (qat_req->dir == COMPRESSION) {
cnv = qat_comp_get_cmp_cnv_flag(resp);
if (unlikely(!cnv)) {
dev_err(&GET_DEV(accel_dev),
"Verified compression not supported\n");
goto end;
}
if (unlikely(produced > qat_req->actual_dlen)) {
memset(inst->dc_data->ovf_buff, 0,
inst->dc_data->ovf_buff_sz);
dev_dbg(&GET_DEV(accel_dev),
"Actual buffer overflow: produced=%d, dlen=%d\n",
produced, qat_req->actual_dlen);
goto end;
}
}
res = 0;
areq->dlen = produced;
if (ctx->qat_comp_callback)
res = ctx->qat_comp_callback(qat_req, resp);
end:
qat_bl_free_bufl(accel_dev, &qat_req->buf);
acomp_request_complete(areq, res);
}
void qat_comp_alg_callback(void *resp)
{
struct qat_compression_req *qat_req =
(void *)(__force long)qat_comp_get_opaque(resp);
struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_comp_generic_callback(qat_req, resp);
qat_alg_send_backlog(backlog);
}
static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_compression_instance *inst;
int node;
if (tfm->node == NUMA_NO_NODE)
node = numa_node_id();
else
node = tfm->node;
memset(ctx, 0, sizeof(*ctx));
inst = qat_compression_get_instance_node(node);
if (!inst)
return -EINVAL;
ctx->inst = inst;
ctx->inst->build_deflate_ctx(ctx->comp_ctx);
return 0;
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
qat_compression_put_instance(ctx->inst);
memset(ctx, 0, sizeof(*ctx));
}
static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = qat_comp_alg_init_tfm(acomp_tfm);
ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
return ret;
}
static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
unsigned int shdr, unsigned int sftr,
unsigned int dhdr, unsigned int dftr)
{
struct qat_compression_req *qat_req = acomp_request_ctx(areq);
struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_compression_instance *inst = ctx->inst;
gfp_t f = qat_algs_alloc_flags(&areq->base);
struct qat_sgl_to_bufl_params params = {0};
int slen = areq->slen - shdr - sftr;
int dlen = areq->dlen - dhdr - dftr;
dma_addr_t sfbuf, dfbuf;
u8 *req = qat_req->req;
size_t ovf_buff_sz;
int ret;
params.sskip = shdr;
params.dskip = dhdr;
if (!areq->src || !slen)
return -EINVAL;
if (areq->dst && !dlen)
return -EINVAL;
qat_req->dst.is_null = false;
/* Handle acomp requests that require the allocation of a destination
* buffer. The size of the destination buffer is double the source
* buffer (rounded up to the size of a page) to fit the decompressed
* output or an expansion on the data for compression.
*/
if (!areq->dst) {
qat_req->dst.is_null = true;
dlen = round_up(2 * slen, PAGE_SIZE);
areq->dst = sgl_alloc(dlen, f, NULL);
if (!areq->dst)
return -ENOMEM;
dlen -= dhdr + dftr;
areq->dlen = dlen;
qat_req->dst.resubmitted = false;
}
if (dir == COMPRESSION) {
params.extra_dst_buff = inst->dc_data->ovf_buff_p;
ovf_buff_sz = inst->dc_data->ovf_buff_sz;
params.sz_extra_dst_buff = ovf_buff_sz;
}
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
&qat_req->buf, ¶ms, f);
if (unlikely(ret))
return ret;
sfbuf = qat_req->buf.blp;
dfbuf = qat_req->buf.bloutp;
qat_req->qat_compression_ctx = ctx;
qat_req->acompress_req = areq;
qat_req->dir = dir;
if (dir == COMPRESSION) {
qat_req->actual_dlen = dlen;
dlen += ovf_buff_sz;
qat_comp_create_compression_req(ctx->comp_ctx, req,
(u64)(__force long)sfbuf, slen,
(u64)(__force long)dfbuf, dlen,
(u64)(__force long)qat_req);
} else {
qat_comp_create_decompression_req(ctx->comp_ctx, req,
(u64)(__force long)sfbuf, slen,
(u64)(__force long)dfbuf, dlen,
(u64)(__force long)qat_req);
}
ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
if (ret == -ENOSPC)
qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
return ret;
}
static int qat_comp_alg_compress(struct acomp_req *req)
{
return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
}
static int qat_comp_alg_decompress(struct acomp_req *req)
{
return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
}
static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
{
if (!req->dst && req->dlen != 0)
return -EINVAL;
if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
return -EINVAL;
return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
QAT_RFC_1950_HDR_SIZE,
QAT_RFC_1950_FOOTER_SIZE);
}
static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
{
struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
u16 zlib_header;
int ret;
if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
return -EBADMSG;
scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
ret = parse_zlib_header(zlib_header);
if (ret) {
dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
return ret;
}
return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
QAT_RFC_1950_FOOTER_SIZE, 0, 0);
}
static struct acomp_alg qat_acomp[] = { {
.base = {
.cra_name = "deflate",
.cra_driver_name = "qat_deflate",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct qat_compression_ctx),
.cra_module = THIS_MODULE,
},
.init = qat_comp_alg_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
.dst_free = sgl_free,
.reqsize = sizeof(struct qat_compression_req),
}, {
.base = {
.cra_name = "zlib-deflate",
.cra_driver_name = "qat_zlib_deflate",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct qat_compression_ctx),
.cra_module = THIS_MODULE,
},
.init = qat_comp_alg_rfc1950_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_rfc1950_compress,
.decompress = qat_comp_alg_rfc1950_decompress,
.dst_free = sgl_free,
.reqsize = sizeof(struct qat_compression_req),
} };
int qat_comp_algs_register(void)
{
int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs == 1)
ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
mutex_unlock(&algs_lock);
return ret;
}
void qat_comp_algs_unregister(void)
{
mutex_lock(&algs_lock);
if (--active_devs == 0)
crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
mutex_unlock(&algs_lock);
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_comp_algs.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#define ADF_VINTSOU_OFFSET 0x204
#define ADF_VINTMSK_OFFSET 0x208
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
static struct workqueue_struct *adf_vf_stop_wq;
struct adf_vf_stop_data {
struct adf_accel_dev *accel_dev;
struct work_struct work;
};
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
}
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
}
EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
PCI_IRQ_MSI);
if (unlikely(stat < 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to enable MSI interrupt: %d\n", stat);
return stat;
}
return 0;
}
static void adf_disable_msi(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
pci_free_irq_vectors(pdev);
}
static void adf_dev_stop_async(struct work_struct *work)
{
struct adf_vf_stop_data *stop_data =
container_of(work, struct adf_vf_stop_data, work);
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
adf_dev_down(accel_dev, false);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
kfree(stop_data);
}
int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_vf_stop_data *stop_data;
clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
if (!stop_data) {
dev_err(&GET_DEV(accel_dev),
"Couldn't schedule stop for vf_%d\n",
accel_dev->accel_id);
return -ENOMEM;
}
stop_data->accel_dev = accel_dev;
INIT_WORK(&stop_data->work, adf_dev_stop_async);
queue_work(adf_vf_stop_wq, &stop_data->work);
return 0;
}
static void adf_pf2vf_bh_handler(void *data)
{
struct adf_accel_dev *accel_dev = data;
bool ret;
ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
if (ret)
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
return;
}
static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
{
tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
(void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
mutex_init(&accel_dev->vf.vf2pf_lock);
return 0;
}
static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
{
tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
mutex_destroy(&accel_dev->vf.vf2pf_lock);
}
static irqreturn_t adf_isr(int irq, void *privdata)
{
struct adf_accel_dev *accel_dev = privdata;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
bool handled = false;
u32 v_int, v_mask;
/* Read VF INT source CSR to determine the source of VF interrupt */
v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
/* Read VF INT mask CSR to determine which sources are masked */
v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
/*
* Recompute v_int ignoring sources that are masked. This is to
* avoid rescheduling the tasklet for interrupts already handled
*/
v_int &= ~v_mask;
/* Check for PF2VF interrupt */
if (v_int & ADF_VINTSOU_PF2VF) {
/* Disable PF to VF interrupt */
adf_disable_pf2vf_interrupts(accel_dev);
/* Schedule tasklet to handle interrupt BH */
tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
handled = true;
}
/* Check bundle interrupt */
if (v_int & ADF_VINTSOU_BUN) {
struct adf_etr_data *etr_data = accel_dev->transport;
struct adf_etr_bank_data *bank = &etr_data->banks[0];
/* Disable Flag and Coalesce Ring Interrupts */
csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
bank->bank_number, 0);
tasklet_hi_schedule(&bank->resp_handler);
handled = true;
}
return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
unsigned int cpu;
int ret;
snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
"qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
(void *)accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
accel_dev->vf.irq_name);
return ret;
}
cpu = accel_dev->accel_id % num_online_cpus();
irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
accel_dev->vf.irq_enabled = true;
return ret;
}
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *priv_data = accel_dev->transport;
tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
(unsigned long)priv_data->banks);
return 0;
}
static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *priv_data = accel_dev->transport;
tasklet_disable(&priv_data->banks[0].resp_handler);
tasklet_kill(&priv_data->banks[0].resp_handler);
}
/**
* adf_vf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device virtual function.
*/
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
if (accel_dev->vf.irq_enabled) {
irq_set_affinity_hint(pdev->irq, NULL);
free_irq(pdev->irq, accel_dev);
}
adf_cleanup_bh(accel_dev);
adf_cleanup_pf2vf_bh(accel_dev);
adf_disable_msi(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
/**
* adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device virtual function.
*
* Return: 0 on success, error code otherwise.
*/
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
{
if (adf_enable_msi(accel_dev))
goto err_out;
if (adf_setup_pf2vf_bh(accel_dev))
goto err_disable_msi;
if (adf_setup_bh(accel_dev))
goto err_cleanup_pf2vf_bh;
if (adf_request_msi_irq(accel_dev))
goto err_cleanup_bh;
return 0;
err_cleanup_bh:
adf_cleanup_bh(accel_dev);
err_cleanup_pf2vf_bh:
adf_cleanup_pf2vf_bh(accel_dev);
err_disable_msi:
adf_disable_msi(accel_dev);
err_out:
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
/**
* adf_flush_vf_wq() - Flush workqueue for VF
* @accel_dev: Pointer to acceleration device.
*
* Function disables the PF/VF interrupts on the VF so that no new messages
* are received and flushes the workqueue 'adf_vf_stop_wq'.
*
* Return: void.
*/
void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
{
adf_disable_pf2vf_interrupts(accel_dev);
flush_workqueue(adf_vf_stop_wq);
}
EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
/**
* adf_init_vf_wq() - Init workqueue for VF
*
* Function init workqueue 'adf_vf_stop_wq' for VF.
*
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_vf_wq(void)
{
adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
return !adf_vf_stop_wq ? -EFAULT : 0;
}
void adf_exit_vf_wq(void)
{
if (adf_vf_stop_wq)
destroy_workqueue(adf_vf_stop_wq);
adf_vf_stop_wq = NULL;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_vf_isr.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport.h"
#include "adf_transport_access_macros.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "qat_compression.h"
#include "icp_qat_fw.h"
#define SEC ADF_KERNEL_SEC
static struct service_hndl qat_compression;
void qat_compression_put_instance(struct qat_compression_instance *inst)
{
atomic_dec(&inst->refctr);
adf_dev_put(inst->accel_dev);
}
static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
{
struct qat_compression_instance *inst;
struct list_head *list_ptr, *tmp;
int i;
list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
inst = list_entry(list_ptr,
struct qat_compression_instance, list);
for (i = 0; i < atomic_read(&inst->refctr); i++)
qat_compression_put_instance(inst);
if (inst->dc_tx)
adf_remove_ring(inst->dc_tx);
if (inst->dc_rx)
adf_remove_ring(inst->dc_rx);
list_del(list_ptr);
kfree(inst);
}
return 0;
}
struct qat_compression_instance *qat_compression_get_instance_node(int node)
{
struct qat_compression_instance *inst = NULL;
struct adf_accel_dev *accel_dev = NULL;
unsigned long best = ~0;
struct list_head *itr;
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
unsigned long ctr;
int tmp_dev_node;
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
if ((node == tmp_dev_node || tmp_dev_node < 0) &&
adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
accel_dev = tmp_dev;
best = ctr;
}
}
}
if (!accel_dev) {
pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
if (adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->compression_list)) {
accel_dev = tmp_dev;
break;
}
}
}
if (!accel_dev)
return NULL;
best = ~0;
list_for_each(itr, &accel_dev->compression_list) {
struct qat_compression_instance *tmp_inst;
unsigned long ctr;
tmp_inst = list_entry(itr, struct qat_compression_instance, list);
ctr = atomic_read(&tmp_inst->refctr);
if (best > ctr) {
inst = tmp_inst;
best = ctr;
}
}
if (inst) {
if (adf_dev_get(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
return NULL;
}
atomic_inc(&inst->refctr);
}
return inst;
}
static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
{
struct qat_compression_instance *inst;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
unsigned long num_inst, num_msg_dc;
unsigned long bank;
int msg_size;
int ret;
int i;
INIT_LIST_HEAD(&accel_dev->compression_list);
strscpy(key, ADF_NUM_DC, sizeof(key));
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
return ret;
ret = kstrtoul(val, 10, &num_inst);
if (ret)
return ret;
for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!inst) {
ret = -ENOMEM;
goto err;
}
list_add_tail(&inst->list, &accel_dev->compression_list);
inst->id = i;
atomic_set(&inst->refctr, 0);
inst->accel_dev = accel_dev;
inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
return ret;
ret = kstrtoul(val, 10, &bank);
if (ret)
return ret;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
if (ret)
return ret;
ret = kstrtoul(val, 10, &num_msg_dc);
if (ret)
return ret;
msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
msg_size, key, NULL, 0, &inst->dc_tx);
if (ret)
return ret;
msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
msg_size, key, qat_comp_alg_callback, 0,
&inst->dc_rx);
if (ret)
return ret;
inst->dc_data = accel_dev->dc_data;
INIT_LIST_HEAD(&inst->backlog.list);
spin_lock_init(&inst->backlog.lock);
}
return 0;
err:
qat_compression_free_instances(accel_dev);
return ret;
}
static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
{
struct device *dev = &GET_DEV(accel_dev);
dma_addr_t obuff_p = DMA_MAPPING_ERROR;
size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
struct adf_dc_data *dc_data = NULL;
u8 *obuff = NULL;
dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
if (!dc_data)
goto err;
obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
if (!obuff)
goto err;
obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, obuff_p)))
goto err;
dc_data->ovf_buff = obuff;
dc_data->ovf_buff_p = obuff_p;
dc_data->ovf_buff_sz = ovf_buff_sz;
accel_dev->dc_data = dc_data;
return 0;
err:
accel_dev->dc_data = NULL;
kfree(obuff);
devm_kfree(dev, dc_data);
return -ENOMEM;
}
static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
{
struct adf_dc_data *dc_data = accel_dev->dc_data;
struct device *dev = &GET_DEV(accel_dev);
if (!dc_data)
return;
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
DMA_FROM_DEVICE);
kfree_sensitive(dc_data->ovf_buff);
devm_kfree(dev, dc_data);
accel_dev->dc_data = NULL;
}
static int qat_compression_init(struct adf_accel_dev *accel_dev)
{
int ret;
ret = qat_compression_alloc_dc_data(accel_dev);
if (ret)
return ret;
ret = qat_compression_create_instances(accel_dev);
if (ret)
qat_free_dc_data(accel_dev);
return ret;
}
static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
{
qat_free_dc_data(accel_dev);
return qat_compression_free_instances(accel_dev);
}
static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
enum adf_event event)
{
int ret;
switch (event) {
case ADF_EVENT_INIT:
ret = qat_compression_init(accel_dev);
break;
case ADF_EVENT_SHUTDOWN:
ret = qat_compression_shutdown(accel_dev);
break;
case ADF_EVENT_RESTARTING:
case ADF_EVENT_RESTARTED:
case ADF_EVENT_START:
case ADF_EVENT_STOP:
default:
ret = 0;
}
return ret;
}
int qat_compression_register(void)
{
memset(&qat_compression, 0, sizeof(qat_compression));
qat_compression.event_hld = qat_compression_event_handler;
qat_compression.name = "qat_compression";
return adf_service_register(&qat_compression);
}
int qat_compression_unregister(void)
{
return adf_service_unregister(&qat_compression);
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_compression.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/bitfield.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_pf_msg.h"
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
NULL, /* no message type defined for value 0 */
NULL, /* no message type defined for value 1 */
adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
adf_pf_ring_to_svc_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
};
/**
* adf_send_pf2vf_msg() - send PF to VF message
* @accel_dev: Pointer to acceleration device
* @vf_nr: VF number to which the message will be sent
* @msg: Message to send
*
* This function allows the PF to send a message to a specific VF.
*
* Return: 0 on success, error code otherwise.
*/
int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
{
struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
&accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
}
/**
* adf_recv_vf2pf_msg() - receive a VF to PF message
* @accel_dev: Pointer to acceleration device
* @vf_nr: Number of the VF from where the message will be received
*
* This function allows the PF to receive a message from a specific VF.
*
* Return: a valid message on success, zero otherwise.
*/
static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
{
struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
}
static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
{
if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
return NULL;
return pf2vf_blkmsg_providers[type];
}
/* Byte pf2vf_blkmsg_data_getter_fn callback */
static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
{
return blkmsg[index];
}
/* CRC pf2vf_blkmsg_data_getter_fn callback */
static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
{
/* count is 0-based, turn it into a length */
return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
}
static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
u8 type, u8 byte, u8 max_size, u8 *data,
pf2vf_blkmsg_data_getter_fn data_getter)
{
u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
adf_pf2vf_blkmsg_provider provider;
u8 msg_size;
provider = get_blkmsg_response_provider(type);
if (unlikely(!provider)) {
pr_err("QAT: No registered provider for message %d\n", type);
*data = ADF_PF2VF_INVALID_BLOCK_TYPE;
return -EINVAL;
}
if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
pr_err("QAT: unknown error from provider for message %d\n", type);
*data = ADF_PF2VF_UNSPECIFIED_ERROR;
return -EINVAL;
}
msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
if (unlikely(msg_size >= max_size)) {
pr_err("QAT: Invalid size %d provided for message type %d\n",
msg_size, type);
*data = ADF_PF2VF_PAYLOAD_TRUNCATED;
return -EINVAL;
}
if (unlikely(byte >= msg_size)) {
pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
byte, msg_size);
*data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
return -EINVAL;
}
*data = data_getter(blkmsg, byte);
return 0;
}
static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
struct pfvf_message req)
{
u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
struct pfvf_message resp = { 0 };
u8 resp_data = 0;
u8 blk_type;
u8 blk_byte;
u8 byte_max;
switch (req.type) {
case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
+ ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
break;
case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
+ ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
break;
case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
break;
}
/* Is this a request for CRC or data? */
if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
dev_dbg(&GET_DEV(vf_info->accel_dev),
"BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
blk_type, blk_byte + 1, vf_info->vf_nr);
if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
byte_max, &resp_data,
adf_pf2vf_blkmsg_get_crc))
resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
} else {
dev_dbg(&GET_DEV(vf_info->accel_dev),
"BlockMsg of type %d for data byte %d received from VF%d\n",
blk_type, blk_byte, vf_info->vf_nr);
if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
byte_max, &resp_data,
adf_pf2vf_blkmsg_get_byte))
resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
}
resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
return resp;
}
static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
struct pfvf_message req)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct pfvf_message resp = {
.type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
.data = RPRESET_SUCCESS
};
u32 bank_number;
u32 rsvd_field;
bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
dev_dbg(&GET_DEV(accel_dev),
"Ring Pair Reset Message received from VF%d for bank 0x%x\n",
vf_nr, bank_number);
if (!hw_data->ring_pair_reset || rsvd_field) {
dev_dbg(&GET_DEV(accel_dev),
"Ring Pair Reset for VF%d is not supported\n", vf_nr);
resp.data = RPRESET_NOT_SUPPORTED;
goto out;
}
if (bank_number >= hw_data->num_banks_per_vf) {
dev_err(&GET_DEV(accel_dev),
"Invalid bank number (0x%x) from VF%d for Ring Reset\n",
bank_number, vf_nr);
resp.data = RPRESET_INVAL_BANK;
goto out;
}
/* Convert the VF provided value to PF bank number */
bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
dev_dbg(&GET_DEV(accel_dev),
"Ring pair reset for VF%d failure\n", vf_nr);
resp.data = RPRESET_TIMEOUT;
goto out;
}
dev_dbg(&GET_DEV(accel_dev),
"Ring pair reset for VF%d successfully\n", vf_nr);
out:
return resp;
}
static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
struct pfvf_message msg, struct pfvf_message *resp)
{
struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
switch (msg.type) {
case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
{
u8 vf_compat_ver = msg.data;
u8 compat;
dev_dbg(&GET_DEV(accel_dev),
"VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
if (vf_compat_ver == 0)
compat = ADF_PF2VF_VF_INCOMPATIBLE;
else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
compat = ADF_PF2VF_VF_COMPATIBLE;
else
compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
vf_info->vf_compat_ver = vf_compat_ver;
resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
ADF_PFVF_COMPAT_THIS_VERSION) |
FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
}
break;
case ADF_VF2PF_MSGTYPE_VERSION_REQ:
{
u8 compat;
dev_dbg(&GET_DEV(accel_dev),
"Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
vf_nr);
/* legacy driver, VF compat_ver is 0 */
vf_info->vf_compat_ver = 0;
/* PF always newer than legacy VF */
compat = ADF_PF2VF_VF_COMPATIBLE;
/* Set legacy major and minor version to the latest, 1.1 */
resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
}
break;
case ADF_VF2PF_MSGTYPE_INIT:
{
dev_dbg(&GET_DEV(accel_dev),
"Init message received from VF%d\n", vf_nr);
vf_info->init = true;
}
break;
case ADF_VF2PF_MSGTYPE_SHUTDOWN:
{
dev_dbg(&GET_DEV(accel_dev),
"Shutdown message received from VF%d\n", vf_nr);
vf_info->init = false;
}
break;
case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
*resp = handle_blkmsg_req(vf_info, msg);
break;
case ADF_VF2PF_MSGTYPE_RP_RESET:
*resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
break;
default:
dev_dbg(&GET_DEV(accel_dev),
"Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
vf_nr, msg.type, msg.data);
return -ENOMSG;
}
return 0;
}
bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
{
struct pfvf_message req;
struct pfvf_message resp = {0};
req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
if (!req.type) /* Legacy or no message */
return true;
if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
return false;
if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
dev_err(&GET_DEV(accel_dev),
"Failed to send response to VF%d\n", vf_nr);
return true;
}
/**
* adf_enable_pf2vf_comms() - Function enables communication from pf to vf
*
* @accel_dev: Pointer to acceleration device virtual function.
*
* This function carries out the necessary steps to setup and start the PFVF
* communication channel, if any.
*
* Return: 0 on success, error code otherwise.
*/
int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
{
adf_pfvf_crc_init();
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
return 0;
}
EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_gen2_config.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "qat_compression.h"
#include "adf_heartbeat.h"
#include "adf_transport_access_macros.h"
static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
int cpus = num_online_cpus();
unsigned long val;
int instances;
int ret;
int i;
if (adf_hw_dev_has_crypto(accel_dev))
instances = min(cpus, banks);
else
instances = 0;
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
val = 128;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 512;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 0;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 2;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 10;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
key, &val, ADF_DEC);
if (ret)
goto err;
}
val = i;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
&val, ADF_DEC);
if (ret)
goto err;
return ret;
err:
dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
return ret;
}
static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
int cpus = num_online_cpus();
unsigned long val;
int instances;
int ret;
int i;
if (adf_hw_dev_has_compression(accel_dev))
instances = min(cpus, banks);
else
instances = 0;
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 512;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 6;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 14;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
}
val = i;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
&val, ADF_DEC);
if (ret)
return ret;
return ret;
err:
dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
return ret;
}
/**
* adf_gen2_dev_config() - create dev config required to create instances
*
* @accel_dev: Pointer to acceleration device.
*
* Function creates device configuration required to create instances
*
* Return: 0 on success, error code otherwise.
*/
int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
{
int ret;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
if (ret)
goto err;
ret = adf_cfg_section_add(accel_dev, "Accelerator0");
if (ret)
goto err;
ret = adf_gen2_crypto_dev_config(accel_dev);
if (ret)
goto err;
ret = adf_gen2_comp_dev_config(accel_dev);
if (ret)
goto err;
ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
if (ret)
goto err;
adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_DEFAULT_MS);
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return ret;
err:
dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen2_config.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/types.h>
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_heartbeat.h"
#include "adf_heartbeat_dbgfs.h"
#define HB_OK 0
#define HB_ERROR -1
#define HB_STATUS_MAX_STRLEN 4
#define HB_STATS_MAX_STRLEN 16
static ssize_t adf_hb_stats_read(struct file *file, char __user *user_buffer,
size_t count, loff_t *ppos)
{
char buf[HB_STATS_MAX_STRLEN];
unsigned int *value;
int len;
if (*ppos > 0)
return 0;
value = file->private_data;
len = scnprintf(buf, sizeof(buf), "%u\n", *value);
return simple_read_from_buffer(user_buffer, count, ppos, buf, len + 1);
}
static const struct file_operations adf_hb_stats_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = adf_hb_stats_read,
};
static ssize_t adf_hb_status_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
enum adf_device_heartbeat_status hb_status;
char ret_str[HB_STATUS_MAX_STRLEN];
struct adf_accel_dev *accel_dev;
int ret_code;
size_t len;
if (*ppos > 0)
return 0;
accel_dev = file->private_data;
ret_code = HB_OK;
adf_heartbeat_status(accel_dev, &hb_status);
if (hb_status != HB_DEV_ALIVE)
ret_code = HB_ERROR;
len = scnprintf(ret_str, sizeof(ret_str), "%d\n", ret_code);
return simple_read_from_buffer(user_buf, count, ppos, ret_str, len + 1);
}
static const struct file_operations adf_hb_status_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = adf_hb_status_read,
};
static ssize_t adf_hb_cfg_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
struct adf_accel_dev *accel_dev;
unsigned int timer_ms;
int len;
if (*ppos > 0)
return 0;
accel_dev = file->private_data;
timer_ms = accel_dev->heartbeat->hb_timer;
len = scnprintf(timer_str, sizeof(timer_str), "%u\n", timer_ms);
return simple_read_from_buffer(user_buf, count, ppos, timer_str,
len + 1);
}
static ssize_t adf_hb_cfg_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
char input_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
struct adf_accel_dev *accel_dev;
int ret, written_chars;
unsigned int timer_ms;
u32 ticks;
accel_dev = file->private_data;
timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
/* last byte left as string termination */
if (count > sizeof(input_str) - 1)
return -EINVAL;
written_chars = simple_write_to_buffer(input_str, sizeof(input_str) - 1,
ppos, user_buf, count);
if (written_chars > 0) {
ret = kstrtouint(input_str, 10, &timer_ms);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"heartbeat_cfg: Invalid value\n");
return ret;
}
if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
dev_err(&GET_DEV(accel_dev),
"heartbeat_cfg: Invalid value\n");
return -EINVAL;
}
/*
* On 4xxx devices adf_timer is responsible for HB updates and
* its period is fixed to 200ms
*/
if (accel_dev->timer)
timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
ret = adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
if (ret)
return ret;
ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
if (ret)
return ret;
ret = adf_send_admin_hb_timer(accel_dev, ticks);
if (ret)
return ret;
accel_dev->heartbeat->hb_timer = timer_ms;
}
return written_chars;
}
static const struct file_operations adf_hb_cfg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = adf_hb_cfg_read,
.write = adf_hb_cfg_write,
};
void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb = accel_dev->heartbeat;
if (!hb)
return;
hb->dbgfs.base_dir = debugfs_create_dir("heartbeat", accel_dev->debugfs_dir);
hb->dbgfs.status = debugfs_create_file("status", 0400, hb->dbgfs.base_dir,
accel_dev, &adf_hb_status_fops);
hb->dbgfs.sent = debugfs_create_file("queries_sent", 0400, hb->dbgfs.base_dir,
&hb->hb_sent_counter, &adf_hb_stats_fops);
hb->dbgfs.failed = debugfs_create_file("queries_failed", 0400, hb->dbgfs.base_dir,
&hb->hb_failed_counter, &adf_hb_stats_fops);
hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
accel_dev, &adf_hb_cfg_fops);
}
EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb = accel_dev->heartbeat;
if (!hb)
return;
debugfs_remove(hb->dbgfs.status);
hb->dbgfs.status = NULL;
debugfs_remove(hb->dbgfs.sent);
hb->dbgfs.sent = NULL;
debugfs_remove(hb->dbgfs.failed);
hb->dbgfs.failed = NULL;
debugfs_remove(hb->dbgfs.cfg);
hb->dbgfs.cfg = NULL;
debugfs_remove(hb->dbgfs.base_dir);
hb->dbgfs.base_dir = NULL;
}
EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_rm);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include "adf_common_drv.h"
#include "adf_gen2_hw_data.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
{
if (!self || !self->accel_mask)
return 0;
return hweight16(self->accel_mask);
}
EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
{
if (!self || !self->ae_mask)
return 0;
return hweight32(self->ae_mask);
}
EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned long accel_mask = hw_data->accel_mask;
unsigned long ae_mask = hw_data->ae_mask;
unsigned int val, i;
/* Enable Accel Engine error detection & correction */
for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
}
/* Enable shared memory error detection & correction */
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
val |= ADF_GEN2_ERRSSMSH_EN;
ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
val |= ADF_GEN2_ERRSSMSH_EN;
ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
}
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 reg;
int i;
/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
for (i = 0; i < num_a_regs; i++) {
reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
if (enable)
reg |= AE2FUNCTION_MAP_VALID;
else
reg &= ~AE2FUNCTION_MAP_VALID;
WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
}
/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
for (i = 0; i < num_b_regs; i++) {
reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
if (enable)
reg |= AE2FUNCTION_MAP_VALID;
else
reg &= ~AE2FUNCTION_MAP_VALID;
WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
}
}
EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
{
admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info)
{
arb_info->arb_cfg = ADF_ARB_CONFIG;
arb_info->arb_offset = ADF_ARB_OFFSET;
arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr = adf_get_pmisc_base(accel_dev);
u32 val;
val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
/* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR(addr, size);
}
static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
}
static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
u32 value)
{
WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
}
static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
}
static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
u32 value)
{
WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
}
static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
{
return READ_CSR_E_STAT(csr_base_addr, bank);
}
static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value)
{
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
dma_addr_t addr)
{
WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
}
static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
}
static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
{
WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
}
static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
}
static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
}
static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
}
static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
{
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
csr_ops->read_csr_ring_tail = read_csr_ring_tail;
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
csr_ops->write_csr_int_col_en = write_csr_int_col_en;
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
u32 straps = hw_data->straps;
u32 fuses = hw_data->fuses;
u32 legfuses;
u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
/* A set bit in legfuses means the feature is OFF in this SKU */
if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
if ((straps | fuses) & ADF_POWERGATE_PKE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
if ((straps | fuses) & ADF_POWERGATE_DC)
capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
unsigned long accel_mask = hw_data->accel_mask;
u32 i = 0;
/* Configures WDT timers */
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
/* Enable WDT for sym and dc */
ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
/* Enable WDT for pke */
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
}
}
EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/internal/kpp.h>
#include <crypto/dh.h>
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <crypto/scatterwalk.h>
#include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h"
#include "qat_algs_send.h"
#include "adf_transport.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
struct qat_rsa_input_params {
union {
struct {
dma_addr_t m;
dma_addr_t e;
dma_addr_t n;
} enc;
struct {
dma_addr_t c;
dma_addr_t d;
dma_addr_t n;
} dec;
struct {
dma_addr_t c;
dma_addr_t p;
dma_addr_t q;
dma_addr_t dp;
dma_addr_t dq;
dma_addr_t qinv;
} dec_crt;
u64 in_tab[8];
};
} __packed __aligned(64);
struct qat_rsa_output_params {
union {
struct {
dma_addr_t c;
} enc;
struct {
dma_addr_t m;
} dec;
u64 out_tab[8];
};
} __packed __aligned(64);
struct qat_rsa_ctx {
char *n;
char *e;
char *d;
char *p;
char *q;
char *dp;
char *dq;
char *qinv;
dma_addr_t dma_n;
dma_addr_t dma_e;
dma_addr_t dma_d;
dma_addr_t dma_p;
dma_addr_t dma_q;
dma_addr_t dma_dp;
dma_addr_t dma_dq;
dma_addr_t dma_qinv;
unsigned int key_sz;
bool crt_mode;
struct qat_crypto_instance *inst;
} __packed __aligned(64);
struct qat_dh_input_params {
union {
struct {
dma_addr_t b;
dma_addr_t xa;
dma_addr_t p;
} in;
struct {
dma_addr_t xa;
dma_addr_t p;
} in_g2;
u64 in_tab[8];
};
} __packed __aligned(64);
struct qat_dh_output_params {
union {
dma_addr_t r;
u64 out_tab[8];
};
} __packed __aligned(64);
struct qat_dh_ctx {
char *g;
char *xa;
char *p;
dma_addr_t dma_g;
dma_addr_t dma_xa;
dma_addr_t dma_p;
unsigned int p_size;
bool g2;
struct qat_crypto_instance *inst;
} __packed __aligned(64);
struct qat_asym_request {
union {
struct qat_rsa_input_params rsa;
struct qat_dh_input_params dh;
} in;
union {
struct qat_rsa_output_params rsa;
struct qat_dh_output_params dh;
} out;
dma_addr_t phy_in;
dma_addr_t phy_out;
char *src_align;
char *dst_align;
struct icp_qat_fw_pke_request req;
union {
struct qat_rsa_ctx *rsa;
struct qat_dh_ctx *dh;
} ctx;
union {
struct akcipher_request *rsa;
struct kpp_request *dh;
} areq;
int err;
void (*cb)(struct icp_qat_fw_pke_resp *resp);
struct qat_alg_req alg_req;
} __aligned(64);
static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
struct qat_crypto_instance *inst,
struct crypto_async_request *base)
{
struct qat_alg_req *alg_req = &qat_req->alg_req;
alg_req->fw_req = (u32 *)&qat_req->req;
alg_req->tx_ring = inst->pke_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(alg_req);
}
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
struct qat_asym_request *req = (void *)(__force long)resp->opaque;
struct kpp_request *areq = req->areq.dh;
struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
resp->pke_resp_hdr.comn_resp_flags);
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (areq->src) {
dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
DMA_TO_DEVICE);
kfree_sensitive(req->src_align);
}
areq->dst_len = req->ctx.dh->p_size;
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
DMA_FROM_DEVICE);
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
kfree_sensitive(req->dst_align);
}
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
kpp_request_complete(areq, err);
}
#define PKE_DH_1536 0x390c1a49
#define PKE_DH_G2_1536 0x2e0b1a3e
#define PKE_DH_2048 0x4d0c1a60
#define PKE_DH_G2_2048 0x3e0b1a55
#define PKE_DH_3072 0x510c1a77
#define PKE_DH_G2_3072 0x3a0b1a6c
#define PKE_DH_4096 0x690c1a8e
#define PKE_DH_G2_4096 0x4a0b1a83
static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
{
unsigned int bitslen = len << 3;
switch (bitslen) {
case 1536:
return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
case 2048:
return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
case 3072:
return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
case 4096:
return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
default:
return 0;
}
}
static int qat_dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
struct qat_asym_request *qat_req =
PTR_ALIGN(kpp_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
gfp_t flags = qat_algs_alloc_flags(&req->base);
int n_input_params = 0;
u8 *vaddr;
int ret;
if (unlikely(!ctx->xa))
return -EINVAL;
if (req->dst_len < ctx->p_size) {
req->dst_len = ctx->p_size;
return -EOVERFLOW;
}
if (req->src_len > ctx->p_size)
return -EINVAL;
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
!req->src && ctx->g2);
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
qat_req->cb = qat_dh_cb;
qat_req->ctx.dh = ctx;
qat_req->areq.dh = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
/*
* If no source is provided use g as base
*/
if (req->src) {
qat_req->in.dh.in.xa = ctx->dma_xa;
qat_req->in.dh.in.p = ctx->dma_p;
n_input_params = 3;
} else {
if (ctx->g2) {
qat_req->in.dh.in_g2.xa = ctx->dma_xa;
qat_req->in.dh.in_g2.p = ctx->dma_p;
n_input_params = 2;
} else {
qat_req->in.dh.in.b = ctx->dma_g;
qat_req->in.dh.in.xa = ctx->dma_xa;
qat_req->in.dh.in.p = ctx->dma_p;
n_input_params = 3;
}
}
ret = -ENOMEM;
if (req->src) {
/*
* src can be of any size in valid range, but HW expects it to
* be the same as modulo p so in case it is different we need
* to allocate a new buf and copy src data.
* In other case we just need to map the user provided buffer.
* Also need to make sure that it is in contiguous buffer.
*/
if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
qat_req->src_align = NULL;
vaddr = sg_virt(req->src);
} else {
int shift = ctx->p_size - req->src_len;
qat_req->src_align = kzalloc(ctx->p_size, flags);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift,
req->src, 0, req->src_len, 0);
vaddr = qat_req->src_align;
}
qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
goto unmap_src;
}
/*
* dst can be of any size in valid range, but HW expects it to be the
* same as modulo m so in case it is different we need to allocate a
* new buf and copy src data.
* In other case we just need to map the user provided buffer.
* Also need to make sure that it is in contiguous buffer.
*/
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
qat_req->dst_align = NULL;
vaddr = sg_virt(req->dst);
} else {
qat_req->dst_align = kzalloc(ctx->p_size, flags);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
vaddr = qat_req->dst_align;
}
qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
goto unmap_dst;
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = n_input_params;
msg->output_param_count = 1;
ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
if (ret == -ENOSPC)
goto unmap_all;
return ret;
unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
unmap_in_params:
if (!dma_mapping_error(dev, qat_req->phy_in))
dma_unmap_single(dev, qat_req->phy_in,
sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.dh.r))
dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src:
if (req->src) {
if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
dma_unmap_single(dev, qat_req->in.dh.in.b,
ctx->p_size,
DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
}
return ret;
}
static int qat_dh_check_params_length(unsigned int p_len)
{
switch (p_len) {
case 1536:
case 2048:
case 3072:
case 4096:
return 0;
}
return -EINVAL;
}
static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
if (qat_dh_check_params_length(params->p_size << 3))
return -EINVAL;
ctx->p_size = params->p_size;
ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
return -ENOMEM;
memcpy(ctx->p, params->p, ctx->p_size);
/* If g equals 2 don't copy it */
if (params->g_size == 1 && *(char *)params->g == 0x02) {
ctx->g2 = true;
return 0;
}
ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
if (!ctx->g)
return -ENOMEM;
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
params->g_size);
return 0;
}
static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
{
if (ctx->g) {
memset(ctx->g, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
ctx->g = NULL;
}
if (ctx->xa) {
memset(ctx->xa, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
ctx->xa = NULL;
}
if (ctx->p) {
memset(ctx->p, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
ctx->p = NULL;
}
ctx->p_size = 0;
ctx->g2 = false;
}
static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
struct dh params;
int ret;
if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
return -EINVAL;
/* Free old secret if any */
qat_dh_clear_ctx(dev, ctx);
ret = qat_dh_set_params(ctx, ¶ms);
if (ret < 0)
goto err_clear_ctx;
ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
GFP_KERNEL);
if (!ctx->xa) {
ret = -ENOMEM;
goto err_clear_ctx;
}
memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
params.key_size);
return 0;
err_clear_ctx:
qat_dh_clear_ctx(dev, ctx);
return ret;
}
static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
return ctx->p_size;
}
static int qat_dh_init_tfm(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(numa_node_id());
if (!inst)
return -EINVAL;
kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
ctx->p_size = 0;
ctx->g2 = false;
ctx->inst = inst;
return 0;
}
static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
qat_dh_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
}
static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
{
struct qat_asym_request *req = (void *)(__force long)resp->opaque;
struct akcipher_request *areq = req->areq.rsa;
struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
resp->pke_resp_hdr.comn_resp_flags);
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
DMA_TO_DEVICE);
kfree_sensitive(req->src_align);
areq->dst_len = req->ctx.rsa->key_sz;
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE);
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
kfree_sensitive(req->dst_align);
}
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
akcipher_request_complete(areq, err);
}
void qat_alg_asym_callback(void *_resp)
{
struct icp_qat_fw_pke_resp *resp = _resp;
struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
struct qat_instance_backlog *backlog = areq->alg_req.backlog;
areq->cb(resp);
qat_alg_send_backlog(backlog);
}
#define PKE_RSA_EP_512 0x1c161b21
#define PKE_RSA_EP_1024 0x35111bf7
#define PKE_RSA_EP_1536 0x4d111cdc
#define PKE_RSA_EP_2048 0x6e111dba
#define PKE_RSA_EP_3072 0x7d111ea3
#define PKE_RSA_EP_4096 0xa5101f7e
static unsigned long qat_rsa_enc_fn_id(unsigned int len)
{
unsigned int bitslen = len << 3;
switch (bitslen) {
case 512:
return PKE_RSA_EP_512;
case 1024:
return PKE_RSA_EP_1024;
case 1536:
return PKE_RSA_EP_1536;
case 2048:
return PKE_RSA_EP_2048;
case 3072:
return PKE_RSA_EP_3072;
case 4096:
return PKE_RSA_EP_4096;
default:
return 0;
}
}
#define PKE_RSA_DP1_512 0x1c161b3c
#define PKE_RSA_DP1_1024 0x35111c12
#define PKE_RSA_DP1_1536 0x4d111cf7
#define PKE_RSA_DP1_2048 0x6e111dda
#define PKE_RSA_DP1_3072 0x7d111ebe
#define PKE_RSA_DP1_4096 0xa5101f98
static unsigned long qat_rsa_dec_fn_id(unsigned int len)
{
unsigned int bitslen = len << 3;
switch (bitslen) {
case 512:
return PKE_RSA_DP1_512;
case 1024:
return PKE_RSA_DP1_1024;
case 1536:
return PKE_RSA_DP1_1536;
case 2048:
return PKE_RSA_DP1_2048;
case 3072:
return PKE_RSA_DP1_3072;
case 4096:
return PKE_RSA_DP1_4096;
default:
return 0;
}
}
#define PKE_RSA_DP2_512 0x1c131b57
#define PKE_RSA_DP2_1024 0x26131c2d
#define PKE_RSA_DP2_1536 0x45111d12
#define PKE_RSA_DP2_2048 0x59121dfa
#define PKE_RSA_DP2_3072 0x81121ed9
#define PKE_RSA_DP2_4096 0xb1111fb2
static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
{
unsigned int bitslen = len << 3;
switch (bitslen) {
case 512:
return PKE_RSA_DP2_512;
case 1024:
return PKE_RSA_DP2_1024;
case 1536:
return PKE_RSA_DP2_1536;
case 2048:
return PKE_RSA_DP2_2048;
case 3072:
return PKE_RSA_DP2_3072;
case 4096:
return PKE_RSA_DP2_4096;
default:
return 0;
}
}
static int qat_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
gfp_t flags = qat_algs_alloc_flags(&req->base);
u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->e))
return -EINVAL;
if (req->dst_len < ctx->key_sz) {
req->dst_len = ctx->key_sz;
return -EOVERFLOW;
}
if (req->src_len > ctx->key_sz)
return -EINVAL;
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
qat_req->cb = qat_rsa_cb;
qat_req->ctx.rsa = ctx;
qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
qat_req->in.rsa.enc.e = ctx->dma_e;
qat_req->in.rsa.enc.n = ctx->dma_n;
ret = -ENOMEM;
/*
* src can be of any size in valid range, but HW expects it to be the
* same as modulo n so in case it is different we need to allocate a
* new buf and copy src data.
* In other case we just need to map the user provided buffer.
* Also need to make sure that it is in contiguous buffer.
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
vaddr = qat_req->src_align;
}
qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
goto unmap_src;
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
vaddr = sg_virt(req->dst);
} else {
qat_req->dst_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
vaddr = qat_req->dst_align;
}
qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_dst;
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
if (ret == -ENOSPC)
goto unmap_all;
return ret;
unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
unmap_in_params:
if (!dma_mapping_error(dev, qat_req->phy_in))
dma_unmap_single(dev, qat_req->phy_in,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
dma_unmap_single(dev, qat_req->out.rsa.enc.c,
ctx->key_sz, DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src:
if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
return ret;
}
static int qat_rsa_dec(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
gfp_t flags = qat_algs_alloc_flags(&req->base);
u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->d))
return -EINVAL;
if (req->dst_len < ctx->key_sz) {
req->dst_len = ctx->key_sz;
return -EOVERFLOW;
}
if (req->src_len > ctx->key_sz)
return -EINVAL;
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
qat_rsa_dec_fn_id_crt(ctx->key_sz) :
qat_rsa_dec_fn_id(ctx->key_sz);
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
qat_req->cb = qat_rsa_cb;
qat_req->ctx.rsa = ctx;
qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
if (ctx->crt_mode) {
qat_req->in.rsa.dec_crt.p = ctx->dma_p;
qat_req->in.rsa.dec_crt.q = ctx->dma_q;
qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
} else {
qat_req->in.rsa.dec.d = ctx->dma_d;
qat_req->in.rsa.dec.n = ctx->dma_n;
}
ret = -ENOMEM;
/*
* src can be of any size in valid range, but HW expects it to be the
* same as modulo n so in case it is different we need to allocate a
* new buf and copy src data.
* In other case we just need to map the user provided buffer.
* Also need to make sure that it is in contiguous buffer.
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
vaddr = qat_req->src_align;
}
qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
goto unmap_src;
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
vaddr = sg_virt(req->dst);
} else {
qat_req->dst_align = kzalloc(ctx->key_sz, flags);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
vaddr = qat_req->dst_align;
}
qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_dst;
if (ctx->crt_mode)
qat_req->in.rsa.in_tab[6] = 0;
else
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
msg->pke_mid.opaque = (u64)(__force long)qat_req;
if (ctx->crt_mode)
msg->input_param_count = 6;
else
msg->input_param_count = 3;
msg->output_param_count = 1;
ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
if (ret == -ENOSPC)
goto unmap_all;
return ret;
unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
unmap_in_params:
if (!dma_mapping_error(dev, qat_req->phy_in))
dma_unmap_single(dev, qat_req->phy_in,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
dma_unmap_single(dev, qat_req->out.rsa.dec.m,
ctx->key_sz, DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src:
if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
return ret;
}
static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
int ret;
while (!*ptr && vlen) {
ptr++;
vlen--;
}
ctx->key_sz = vlen;
ret = -EINVAL;
/* invalid key size provided */
if (!qat_rsa_enc_fn_id(ctx->key_sz))
goto err;
ret = -ENOMEM;
ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
if (!ctx->n)
goto err;
memcpy(ctx->n, ptr, ctx->key_sz);
return 0;
err:
ctx->key_sz = 0;
ctx->n = NULL;
return ret;
}
static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
while (!*ptr && vlen) {
ptr++;
vlen--;
}
if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
ctx->e = NULL;
return -EINVAL;
}
ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
if (!ctx->e)
return -ENOMEM;
memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
return 0;
}
static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
int ret;
while (!*ptr && vlen) {
ptr++;
vlen--;
}
ret = -EINVAL;
if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
goto err;
ret = -ENOMEM;
ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->d)
goto err;
memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
return 0;
err:
ctx->d = NULL;
return ret;
}
static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
{
while (!**ptr && *len) {
(*ptr)++;
(*len)--;
}
}
static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr;
unsigned int len;
unsigned int half_key_sz = ctx->key_sz / 2;
/* p */
ptr = rsa_key->p;
len = rsa_key->p_sz;
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto err;
ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
goto err;
memcpy(ctx->p + (half_key_sz - len), ptr, len);
/* q */
ptr = rsa_key->q;
len = rsa_key->q_sz;
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_p;
ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
if (!ctx->q)
goto free_p;
memcpy(ctx->q + (half_key_sz - len), ptr, len);
/* dp */
ptr = rsa_key->dp;
len = rsa_key->dp_sz;
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_q;
ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
GFP_KERNEL);
if (!ctx->dp)
goto free_q;
memcpy(ctx->dp + (half_key_sz - len), ptr, len);
/* dq */
ptr = rsa_key->dq;
len = rsa_key->dq_sz;
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dp;
ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
GFP_KERNEL);
if (!ctx->dq)
goto free_dp;
memcpy(ctx->dq + (half_key_sz - len), ptr, len);
/* qinv */
ptr = rsa_key->qinv;
len = rsa_key->qinv_sz;
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dq;
ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
GFP_KERNEL);
if (!ctx->qinv)
goto free_dq;
memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
ctx->crt_mode = true;
return;
free_dq:
memset(ctx->dq, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
ctx->dq = NULL;
free_dp:
memset(ctx->dp, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
ctx->dp = NULL;
free_q:
memset(ctx->q, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
ctx->q = NULL;
free_p:
memset(ctx->p, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
ctx->p = NULL;
err:
ctx->crt_mode = false;
}
static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
{
unsigned int half_key_sz = ctx->key_sz / 2;
/* Free the old key if any */
if (ctx->n)
dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
if (ctx->e)
dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
if (ctx->d) {
memset(ctx->d, '\0', ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
}
if (ctx->p) {
memset(ctx->p, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
}
if (ctx->q) {
memset(ctx->q, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
}
if (ctx->dp) {
memset(ctx->dp, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
}
if (ctx->dq) {
memset(ctx->dq, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
}
if (ctx->qinv) {
memset(ctx->qinv, '\0', half_key_sz);
dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
}
ctx->n = NULL;
ctx->e = NULL;
ctx->d = NULL;
ctx->p = NULL;
ctx->q = NULL;
ctx->dp = NULL;
ctx->dq = NULL;
ctx->qinv = NULL;
ctx->crt_mode = false;
ctx->key_sz = 0;
}
static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen, bool private)
{
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
struct rsa_key rsa_key;
int ret;
qat_rsa_clear_ctx(dev, ctx);
if (private)
ret = rsa_parse_priv_key(&rsa_key, key, keylen);
else
ret = rsa_parse_pub_key(&rsa_key, key, keylen);
if (ret < 0)
goto free;
ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
if (ret < 0)
goto free;
ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
if (ret < 0)
goto free;
if (private) {
ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
if (ret < 0)
goto free;
qat_rsa_setkey_crt(ctx, &rsa_key);
}
if (!ctx->n || !ctx->e) {
/* invalid key provided */
ret = -EINVAL;
goto free;
}
if (private && !ctx->d) {
/* invalid private key provided */
ret = -EINVAL;
goto free;
}
return 0;
free:
qat_rsa_clear_ctx(dev, ctx);
return ret;
}
static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
return qat_rsa_setkey(tfm, key, keylen, false);
}
static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
return qat_rsa_setkey(tfm, key, keylen, true);
}
static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
{
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
return ctx->key_sz;
}
static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(numa_node_id());
if (!inst)
return -EINVAL;
akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
ctx->key_sz = 0;
ctx->inst = inst;
return 0;
}
static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
qat_rsa_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
}
static struct akcipher_alg rsa = {
.encrypt = qat_rsa_enc,
.decrypt = qat_rsa_dec,
.set_pub_key = qat_rsa_setpubkey,
.set_priv_key = qat_rsa_setprivkey,
.max_size = qat_rsa_max_size,
.init = qat_rsa_init_tfm,
.exit = qat_rsa_exit_tfm,
.base = {
.cra_name = "rsa",
.cra_driver_name = "qat-rsa",
.cra_priority = 1000,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct qat_rsa_ctx),
},
};
static struct kpp_alg dh = {
.set_secret = qat_dh_set_secret,
.generate_public_key = qat_dh_compute_value,
.compute_shared_secret = qat_dh_compute_value,
.max_size = qat_dh_max_size,
.init = qat_dh_init_tfm,
.exit = qat_dh_exit_tfm,
.base = {
.cra_name = "dh",
.cra_driver_name = "qat-dh",
.cra_priority = 1000,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct qat_dh_ctx),
},
};
int qat_asym_algs_register(void)
{
int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs == 1) {
rsa.base.cra_flags = 0;
ret = crypto_register_akcipher(&rsa);
if (ret)
goto unlock;
ret = crypto_register_kpp(&dh);
}
unlock:
mutex_unlock(&algs_lock);
return ret;
}
void qat_asym_algs_unregister(void)
{
mutex_lock(&algs_lock);
if (--active_devs == 0) {
crypto_unregister_akcipher(&rsa);
crypto_unregister_kpp(&dh);
}
mutex_unlock(&algs_lock);
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_asym_algs.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/bitfield.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_vf_msg.h"
#include "adf_pfvf_vf_proto.h"
/**
* adf_vf2pf_notify_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
if (adf_send_vf2pf_msg(accel_dev, msg)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
/**
* adf_vf2pf_notify_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
if (adf_send_vf2pf_msg(accel_dev, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
u8 pf_version;
int compat;
int ret;
struct pfvf_message resp;
struct pfvf_message msg = {
.type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
.data = ADF_PFVF_COMPAT_THIS_VERSION,
};
BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Compatibility Version Request.\n");
return ret;
}
pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
/* Response from PF received, check compatibility */
switch (compat) {
case ADF_PF2VF_VF_COMPATIBLE:
break;
case ADF_PF2VF_VF_COMPAT_UNKNOWN:
/* VF is newer than PF - compatible for now */
break;
case ADF_PF2VF_VF_INCOMPATIBLE:
dev_err(&GET_DEV(accel_dev),
"PF (vers %d) and VF (vers %d) are not compatible\n",
pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
return -EINVAL;
default:
dev_err(&GET_DEV(accel_dev),
"Invalid response from PF; assume not compatible\n");
return -EINVAL;
}
accel_dev->vf.pf_compat_ver = pf_version;
return 0;
}
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct capabilities_v3 cap_msg = { 0 };
unsigned int len = sizeof(cap_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
/* The PF is too old to support the extended capabilities */
return 0;
if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
(u8 *)&cap_msg, &len)) {
dev_err(&GET_DEV(accel_dev),
"QAT: Failed to get block message response\n");
return -EFAULT;
}
switch (cap_msg.hdr.version) {
default:
/* Newer version received, handle only the know parts */
fallthrough;
case ADF_PFVF_CAPABILITIES_V3_VERSION:
if (likely(len >= sizeof(struct capabilities_v3)))
hw_data->clock_frequency = cap_msg.frequency;
else
dev_info(&GET_DEV(accel_dev), "Could not get frequency");
fallthrough;
case ADF_PFVF_CAPABILITIES_V2_VERSION:
if (likely(len >= sizeof(struct capabilities_v2)))
hw_data->accel_capabilities_mask = cap_msg.capabilities;
else
dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
fallthrough;
case ADF_PFVF_CAPABILITIES_V1_VERSION:
if (likely(len >= sizeof(struct capabilities_v1))) {
hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
} else {
dev_err(&GET_DEV(accel_dev),
"Capabilities message truncated to %d bytes\n", len);
return -EFAULT;
}
}
return 0;
}
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
{
struct ring_to_svc_map_v1 rts_map_msg = { 0 };
unsigned int len = sizeof(rts_map_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
/* Use already set default mappings */
return 0;
if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
(u8 *)&rts_map_msg, &len)) {
dev_err(&GET_DEV(accel_dev),
"QAT: Failed to get block message response\n");
return -EFAULT;
}
if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
dev_err(&GET_DEV(accel_dev),
"RING_TO_SVC message truncated to %d bytes\n", len);
return -EFAULT;
}
/* Only v1 at present */
accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
return 0;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR(addr, size);
}
static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
}
static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
u32 value)
{
WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
}
static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
}
static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
u32 value)
{
WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
}
static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
{
return READ_CSR_E_STAT(csr_base_addr, bank);
}
static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
u32 value)
{
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
dma_addr_t addr)
{
WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
}
static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
}
static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
{
WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
}
static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
}
static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
}
static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
}
static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
{
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
csr_ops->read_csr_ring_tail = read_csr_ring_tail;
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
csr_ops->write_csr_int_col_en = write_csr_int_col_en;
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
u32 *lower)
{
*lower = lower_32_bits(value);
*upper = upper_32_bits(value);
}
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
u32 ssm_wdt_pke_high = 0;
u32 ssm_wdt_pke_low = 0;
u32 ssm_wdt_high = 0;
u32 ssm_wdt_low = 0;
/* Convert 64bit WDT timer value into 32bit values for
* mmio write to 32bit CSRs.
*/
adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
&ssm_wdt_pke_low);
/* Enable WDT for sym and dc */
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
/* Enable WDT for pke */
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
}
EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
static int reset_ring_pair(void __iomem *csr, u32 bank_number)
{
u32 status;
int ret;
/* Write rpresetctl register BIT(0) as 1
* Since rpresetctl registers have no RW fields, no need to preserve
* values for other bits. Just write directly.
*/
ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
ADF_WQM_CSR_RPRESETCTL_RESET);
/* Read rpresetsts register and wait for rp reset to complete */
ret = read_poll_timeout(ADF_CSR_RD, status,
status & ADF_WQM_CSR_RPRESETSTS_STATUS,
ADF_RPRESET_POLL_DELAY_US,
ADF_RPRESET_POLL_TIMEOUT_US, true,
csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
if (!ret) {
/* When rp reset is done, clear rpresetsts */
ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
ADF_WQM_CSR_RPRESETSTS_STATUS);
}
return ret;
}
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
void __iomem *csr;
int ret;
if (bank_number >= hw_data->num_banks)
return -EINVAL;
dev_dbg(&GET_DEV(accel_dev),
"ring pair reset for bank:%d\n", bank_number);
csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
ret = reset_ring_pair(csr, bank_number);
if (ret)
dev_err(&GET_DEV(accel_dev),
"ring pair reset failed (timeout)\n");
else
dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
static struct workqueue_struct *device_reset_wq;
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
if (!accel_dev) {
dev_err(&pdev->dev, "Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
if (state == pci_channel_io_perm_failure) {
dev_err(&pdev->dev, "Can't recover from device error\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
/* reset dev data */
struct adf_reset_dev_data {
int mode;
struct adf_accel_dev *accel_dev;
struct completion compl;
struct work_struct reset_work;
};
void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
u16 bridge_ctl = 0;
if (!parent)
parent = pdev;
if (!pci_wait_for_pending_transaction(pdev))
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
msleep(100);
bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
msleep(100);
}
EXPORT_SYMBOL_GPL(adf_reset_sbr);
void adf_reset_flr(struct adf_accel_dev *accel_dev)
{
pcie_flr(accel_to_pci_dev(accel_dev));
}
EXPORT_SYMBOL_GPL(adf_reset_flr);
void adf_dev_restore(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
if (hw_device->reset_device) {
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
hw_device->reset_device(accel_dev);
pci_restore_state(pdev);
pci_save_state(pdev);
}
}
static void adf_device_reset_worker(struct work_struct *work)
{
struct adf_reset_dev_data *reset_data =
container_of(work, struct adf_reset_dev_data, reset_work);
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
}
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
/* The dev is back alive. Notify the caller if in sync mode */
if (reset_data->mode == ADF_DEV_RESET_SYNC)
complete(&reset_data->compl);
else
kfree(reset_data);
}
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
enum adf_dev_reset_mode mode)
{
struct adf_reset_dev_data *reset_data;
if (!adf_dev_started(accel_dev) ||
test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
reset_data->accel_dev = accel_dev;
init_completion(&reset_data->compl);
reset_data->mode = mode;
INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
queue_work(device_reset_wq, &reset_data->reset_work);
/* If in sync mode wait for the result */
if (mode == ADF_DEV_RESET_SYNC) {
int ret = 0;
/* Maximum device reset time is 10 seconds */
unsigned long wait_jiffies = msecs_to_jiffies(10000);
unsigned long timeout = wait_for_completion_timeout(
&reset_data->compl, wait_jiffies);
if (!timeout) {
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
ret = -EFAULT;
}
kfree(reset_data);
return ret;
}
return 0;
}
static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_RECOVERED;
}
static void adf_resume(struct pci_dev *pdev)
{
dev_info(&pdev->dev, "Acceleration driver reset completed\n");
dev_info(&pdev->dev, "Device is up and running\n");
}
const struct pci_error_handlers adf_err_handler = {
.error_detected = adf_error_detected,
.slot_reset = adf_slot_reset,
.resume = adf_resume,
};
EXPORT_SYMBOL_GPL(adf_err_handler);
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
WQ_MEM_RECLAIM, 0);
return !device_reset_wq ? -EFAULT : 0;
}
void adf_exit_aer(void)
{
if (device_reset_wq)
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_aer.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
#include "icp_qat_fw_comp.h"
#include "icp_qat_hw_20_comp.h"
#include "adf_gen4_dc.h"
static void qat_comp_build_deflate(void *ctx)
{
struct icp_qat_fw_comp_req *req_tmpl =
(struct icp_qat_fw_comp_req *)ctx;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
u32 upper_val;
u32 lower_val;
memset(req_tmpl, 0, sizeof(*req_tmpl));
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
QAT_COMN_PTR_TYPE_SGL);
header->serv_specif_flags =
ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
req_pars->req_par_flags =
ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
ICP_QAT_FW_COMP_EOP,
ICP_QAT_FW_COMP_BFINAL,
ICP_QAT_FW_COMP_CNV,
ICP_QAT_FW_COMP_CNV_RECOVERY,
ICP_QAT_FW_COMP_NO_CNV_DFX,
ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
ICP_QAT_FW_COMP_NO_XXHASH_ACC,
ICP_QAT_FW_COMP_CNV_ERROR_NONE,
ICP_QAT_FW_COMP_NO_APPEND_CRC,
ICP_QAT_FW_COMP_NO_DROP_DATA);
/* Fill second half of the template for decompression */
memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
req_tmpl++;
header = &req_tmpl->comn_hdr;
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
cd_pars = &req_tmpl->cd_pars;
hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
}
void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
{
dc_ops->build_deflate_ctx = qat_comp_build_deflate;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2021 Intel Corporation */
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_pfvf.h"
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
#define ADF_GEN4_VF_MSK 0xFFFF
#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
#define ADF_PFVF_GEN4_MSGDATA_SHIFT 8
#define ADF_PFVF_GEN4_MSGDATA_MASK 0xFFFFFF
static const struct pfvf_csr_format csr_gen4_fmt = {
{ ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
{ ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
};
static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
{
return ADF_4XXX_PF2VM_OFFSET(i);
}
static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
{
return ADF_4XXX_VM2PF_OFFSET(i);
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
u32 val;
val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
{
ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
}
static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
{
u32 sources, disabled, pending;
/* Get the interrupt sources triggered by VFs */
sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
if (!sources)
return 0;
/* Get the already disabled interrupts */
disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
pending = sources & ~disabled;
if (!pending)
return 0;
/* Due to HW limitations, when disabling the interrupts, we can't
* just disable the requested sources, as this would lead to missed
* interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
* To work around it, disable all and re-enable only the sources that
* are not in vf_mask and were not already disabled. Re-enabling will
* trigger a new interrupt for the sources that have changed in the
* meantime, if any.
*/
ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
/* Return the sources of the (new) interrupt(s) */
return pending;
}
static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
struct pfvf_message msg, u32 pfvf_offset,
struct mutex *csr_lock)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 csr_val;
int ret;
csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
if (unlikely(!csr_val))
return -EINVAL;
mutex_lock(csr_lock);
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
/* Wait for confirmation from remote that it received the message */
ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
ADF_PFVF_MSG_ACK_DELAY_US,
ADF_PFVF_MSG_ACK_MAX_DELAY_US,
true, pmisc_addr, pfvf_offset);
if (ret < 0)
dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
mutex_unlock(csr_lock);
return ret;
}
static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
u32 pfvf_offset, u8 compat_ver)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
struct pfvf_message msg = { 0 };
u32 csr_val;
/* Read message from the CSR */
csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
if (!(csr_val & ADF_PFVF_INT)) {
dev_info(&GET_DEV(accel_dev),
"Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
return msg;
}
/* We can now acknowledge the message reception by clearing the
* interrupt bit
*/
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
/* Return the pfvf_message format */
return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
}
void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
pfvf_ops->send_msg = adf_gen4_pfvf_send;
pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/delay.h>
#include <linux/nospec.h>
#include "adf_accel_devices.h"
#include "adf_transport_internal.h"
#include "adf_transport_access_macros.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#define ADF_MAX_RING_THRESHOLD 80
#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
static inline u32 adf_modulo(u32 data, u32 shift)
{
u32 div = data >> shift;
u32 mult = div << shift;
return data - mult;
}
static inline int adf_check_ring_alignment(u64 addr, u64 size)
{
if (((size - 1) & addr) != 0)
return -EFAULT;
return 0;
}
static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
{
int i = ADF_MIN_RING_SIZE;
for (; i <= ADF_MAX_RING_SIZE; i++)
if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
return i;
return ADF_DEFAULT_RING_SIZE;
}
static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock(&bank->lock);
if (bank->ring_mask & (1 << ring)) {
spin_unlock(&bank->lock);
return -EFAULT;
}
bank->ring_mask |= (1 << ring);
spin_unlock(&bank->lock);
return 0;
}
static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock(&bank->lock);
bank->ring_mask &= ~(1 << ring);
spin_unlock(&bank->lock);
}
static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
spin_lock_bh(&bank->lock);
bank->irq_mask |= (1 << ring);
spin_unlock_bh(&bank->lock);
csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
bank->irq_mask);
csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
bank->irq_coalesc_timer);
}
static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
spin_lock_bh(&bank->lock);
bank->irq_mask &= ~(1 << ring);
spin_unlock_bh(&bank->lock);
csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
bank->irq_mask);
}
bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
{
return atomic_read(ring->inflights) > ring->threshold;
}
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
if (atomic_add_return(1, ring->inflights) >
ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
atomic_dec(ring->inflights);
return -EAGAIN;
}
spin_lock_bh(&ring->lock);
memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
ring->tail = adf_modulo(ring->tail +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
ring->bank->bank_number, ring->ring_number,
ring->tail);
spin_unlock_bh(&ring->lock);
return 0;
}
static int adf_handle_response(struct adf_etr_ring_data *ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 msg_counter = 0;
u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
while (*msg != ADF_RING_EMPTY_SIG) {
ring->callback((u32 *)msg);
atomic_dec(ring->inflights);
*msg = ADF_RING_EMPTY_SIG;
ring->head = adf_modulo(ring->head +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
msg_counter++;
msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
}
if (msg_counter > 0) {
csr_ops->write_csr_ring_head(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number, ring->head);
}
return 0;
}
static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
csr_ops->write_csr_ring_config(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number, ring_config);
}
static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 ring_config =
BUILD_RESP_RING_CONFIG(ring->ring_size,
ADF_RING_NEAR_WATERMARK_512,
ADF_RING_NEAR_WATERMARK_0);
csr_ops->write_csr_ring_config(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number, ring_config);
}
static int adf_init_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u64 ring_base;
u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
ring_size_bytes, &ring->dma_addr,
GFP_KERNEL);
if (!ring->base_addr)
return -ENOMEM;
memset(ring->base_addr, 0x7F, ring_size_bytes);
/* The base_addr has to be aligned to the size of the buffer */
if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
ring->base_addr, ring->dma_addr);
ring->base_addr = NULL;
return -EFAULT;
}
if (hw_data->tx_rings_mask & (1 << ring->ring_number))
adf_configure_tx_ring(ring);
else
adf_configure_rx_ring(ring);
ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
ring->ring_size);
csr_ops->write_csr_ring_base(ring->bank->csr_addr,
ring->bank->bank_number, ring->ring_number,
ring_base);
spin_lock_init(&ring->lock);
return 0;
}
static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
{
u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
if (ring->base_addr) {
memset(ring->base_addr, 0x7F, ring_size_bytes);
dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
ring_size_bytes, ring->base_addr,
ring->dma_addr);
}
}
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
u32 bank_num, u32 num_msgs,
u32 msg_size, const char *ring_name,
adf_callback_fn callback, int poll_mode,
struct adf_etr_ring_data **ring_ptr)
{
struct adf_etr_data *transport_data = accel_dev->transport;
u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
int max_inflights;
u32 ring_num;
int ret;
if (bank_num >= GET_MAX_BANKS(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
return -EFAULT;
}
if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
return -EFAULT;
}
if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
dev_err(&GET_DEV(accel_dev),
"Invalid ring size for given msg size\n");
return -EFAULT;
}
if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
section, ring_name);
return -EFAULT;
}
if (kstrtouint(val, 10, &ring_num)) {
dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
return -EFAULT;
}
if (ring_num >= num_rings_per_bank) {
dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
return -EFAULT;
}
ring_num = array_index_nospec(ring_num, num_rings_per_bank);
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
ring_num, ring_name);
return -EFAULT;
}
ring = &bank->rings[ring_num];
ring->ring_number = ring_num;
ring->bank = bank;
ring->callback = callback;
ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring->head = 0;
ring->tail = 0;
max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
atomic_set(ring->inflights, 0);
ret = adf_init_ring(ring);
if (ret)
goto err;
/* Enable HW arbitration for the given ring */
adf_update_ring_arb(ring);
if (adf_ring_debugfs_add(ring, ring_name)) {
dev_err(&GET_DEV(accel_dev),
"Couldn't add ring debugfs entry\n");
ret = -EFAULT;
goto err;
}
/* Enable interrupts if needed */
if (callback && (!poll_mode))
adf_enable_ring_irq(bank, ring->ring_number);
*ring_ptr = ring;
return 0;
err:
adf_cleanup_ring(ring);
adf_unreserve_ring(bank, ring_num);
adf_update_ring_arb(ring);
return ret;
}
void adf_remove_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
/* Disable interrupts for the given ring */
adf_disable_ring_irq(bank, ring->ring_number);
/* Clear PCI config space */
csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
ring->ring_number, 0);
csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
ring->ring_number, 0);
adf_ring_debugfs_rm(ring);
adf_unreserve_ring(bank, ring->ring_number);
/* Disable HW arbitration for the given ring */
adf_update_ring_arb(ring);
adf_cleanup_ring(ring);
}
static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
{
struct adf_accel_dev *accel_dev = bank->accel_dev;
u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
unsigned long empty_rings;
int i;
empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
bank->bank_number);
empty_rings = ~empty_rings & bank->irq_mask;
for_each_set_bit(i, &empty_rings, num_rings_per_bank)
adf_handle_response(&bank->rings[i]);
}
void adf_response_handler(uintptr_t bank_addr)
{
struct adf_etr_bank_data *bank = (void *)bank_addr;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
/* Handle all the responses and reenable IRQs */
adf_ring_response_handler(bank);
csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
bank->irq_mask);
}
static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
const char *section, const char *format,
u32 key, u32 *value)
{
char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
return -EFAULT;
if (kstrtouint(val_buf, 10, value))
return -EFAULT;
return 0;
}
static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
const char *section,
u32 bank_num_in_accel)
{
if (adf_get_cfg_int(bank->accel_dev, section,
ADF_ETRMGR_COALESCE_TIMER_FORMAT,
bank_num_in_accel, &bank->irq_coalesc_timer))
bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
}
static int adf_init_bank(struct adf_accel_dev *accel_dev,
struct adf_etr_bank_data *bank,
u32 bank_num, void __iomem *csr_addr)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
u32 irq_mask = BIT(num_rings_per_bank) - 1;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
u32 i, coalesc_enabled = 0;
unsigned long ring_mask;
int size;
memset(bank, 0, sizeof(*bank));
bank->bank_number = bank_num;
bank->csr_addr = csr_addr;
bank->accel_dev = accel_dev;
spin_lock_init(&bank->lock);
/* Allocate the rings in the bank */
size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
bank->rings = kzalloc_node(size, GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!bank->rings)
return -ENOMEM;
/* Enable IRQ coalescing always. This will allow to use
* the optimised flag and coalesc register.
* If it is disabled in the config file just use min time value */
if ((adf_get_cfg_int(accel_dev, "Accelerator0",
ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
&coalesc_enabled) == 0) && coalesc_enabled)
adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
else
bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
for (i = 0; i < num_rings_per_bank; i++) {
csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
ring->inflights =
kzalloc_node(sizeof(atomic_t),
GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights)
goto err;
} else {
if (i < hw_data->tx_rx_gap) {
dev_err(&GET_DEV(accel_dev),
"Invalid tx rings mask config\n");
goto err;
}
tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
ring->inflights = tx_ring->inflights;
}
}
if (adf_bank_debugfs_add(bank)) {
dev_err(&GET_DEV(accel_dev),
"Failed to add bank debugfs entry\n");
goto err;
}
csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
return 0;
err:
ring_mask = hw_data->tx_rings_mask;
for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
ring = &bank->rings[i];
kfree(ring->inflights);
ring->inflights = NULL;
}
kfree(bank->rings);
return -ENOMEM;
}
/**
* adf_init_etr_data() - Initialize transport rings for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function is the initializes the communications channels (rings) to the
* acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr_addr;
u32 size;
u32 num_banks = 0;
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data)
return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data);
etr_data->banks = kzalloc_node(size, GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) {
ret = -ENOMEM;
goto err_bank;
}
accel_dev->transport = etr_data;
i = hw_data->get_etr_bar_id(hw_data);
csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
/* accel_dev->debugfs_dir should always be non-NULL here */
etr_data->debug = debugfs_create_dir("transport",
accel_dev->debugfs_dir);
for (i = 0; i < num_banks; i++) {
ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
csr_addr);
if (ret)
goto err_bank_all;
}
return 0;
err_bank_all:
debugfs_remove(etr_data->debug);
kfree(etr_data->banks);
err_bank:
kfree(etr_data);
accel_dev->transport = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(adf_init_etr_data);
static void cleanup_bank(struct adf_etr_bank_data *bank)
{
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
u32 i;
for (i = 0; i < num_rings_per_bank; i++) {
struct adf_etr_ring_data *ring = &bank->rings[i];
if (bank->ring_mask & (1 << i))
adf_cleanup_ring(ring);
if (hw_data->tx_rings_mask & (1 << i))
kfree(ring->inflights);
}
kfree(bank->rings);
adf_bank_debugfs_rm(bank);
memset(bank, 0, sizeof(*bank));
}
static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data = accel_dev->transport;
u32 i, num_banks = GET_MAX_BANKS(accel_dev);
for (i = 0; i < num_banks; i++)
cleanup_bank(&etr_data->banks[i]);
}
/**
* adf_cleanup_etr_data() - Clear transport rings for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function is the clears the communications channels (rings) of the
* acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data = accel_dev->transport;
if (etr_data) {
adf_cleanup_etr_handles(accel_dev);
debugfs_remove(etr_data->debug);
kfree(etr_data->banks->rings);
kfree(etr_data->banks);
kfree(etr_data);
accel_dev->transport = NULL;
}
}
EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_transport.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "qat_algs_send.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
#include "qat_bl.h"
#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
ICP_QAT_HW_CIPHER_ENCRYPT)
#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
#define HW_CAP_AES_V2(accel_dev) \
(GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
ICP_ACCEL_CAPABILITIES_AES_V2)
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
/* Common content descriptor */
struct qat_alg_cd {
union {
struct qat_enc { /* Encrypt content desc */
struct icp_qat_hw_cipher_algo_blk cipher;
struct icp_qat_hw_auth_algo_blk hash;
} qat_enc_cd;
struct qat_dec { /* Decrypt content desc */
struct icp_qat_hw_auth_algo_blk hash;
struct icp_qat_hw_cipher_algo_blk cipher;
} qat_dec_cd;
};
} __aligned(64);
struct qat_alg_aead_ctx {
struct qat_alg_cd *enc_cd;
struct qat_alg_cd *dec_cd;
dma_addr_t enc_cd_paddr;
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
union {
struct sha1_state sha1;
struct sha256_state sha256;
struct sha512_state sha512;
};
char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_skcipher_ctx {
struct icp_qat_hw_cipher_algo_blk *enc_cd;
struct icp_qat_hw_cipher_algo_blk *dec_cd;
dma_addr_t enc_cd_paddr;
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
struct crypto_skcipher *ftfm;
struct crypto_cipher *tweak;
bool fallback;
int mode;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
{
switch (qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return ICP_QAT_HW_SHA1_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return ICP_QAT_HW_SHA256_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return ICP_QAT_HW_SHA512_STATE1_SZ;
default:
return -EFAULT;
}
}
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
const u8 *auth_key,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
memset(ctx->ipad, 0, block_size);
memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
auth_keylen, ctx->ipad);
if (ret)
return ret;
memcpy(ctx->opad, ctx->ipad, digest_size);
} else {
memcpy(ctx->ipad, auth_key, auth_keylen);
memcpy(ctx->opad, auth_key, auth_keylen);
}
for (i = 0; i < block_size; i++) {
char *ipad_ptr = ctx->ipad + i;
char *opad_ptr = ctx->opad + i;
*ipad_ptr ^= HMAC_IPAD_VALUE;
*opad_ptr ^= HMAC_OPAD_VALUE;
}
if (crypto_shash_init(shash))
return -EFAULT;
if (crypto_shash_update(shash, ctx->ipad, block_size))
return -EFAULT;
hash_state_out = (__be32 *)hash->sha.state1;
hash512_state_out = (__be64 *)hash_state_out;
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
}
if (crypto_shash_init(shash))
return -EFAULT;
if (crypto_shash_update(shash, ctx->opad, block_size))
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
if (offset < 0)
return -EFAULT;
hash_state_out = (__be32 *)(hash->sha.state1 + offset);
hash512_state_out = (__be64 *)hash_state_out;
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
}
memzero_explicit(ctx->ipad, block_size);
memzero_explicit(ctx->opad, block_size);
return 0;
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
{
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_SGL);
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_PARTIAL_NONE);
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_PROTO);
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
int alg,
struct crypto_authenc_keys *keys,
int mode)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
struct icp_qat_hw_auth_algo_blk *hash =
(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
/* CD setup */
cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
/* Request setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
/* Cipher CD config setup */
cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset = 0;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
/* Auth CD config setup */
hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
hash_cd_ctrl->inner_res_sz = digestsize;
hash_cd_ctrl->final_sz = digestsize;
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
hash_cd_ctrl->inner_state1_sz =
round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
hash_cd_ctrl->inner_state2_sz =
round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
break;
default:
break;
}
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
return 0;
}
static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
int alg,
struct crypto_authenc_keys *keys,
int mode)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
struct icp_qat_hw_cipher_algo_blk *cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
struct icp_qat_fw_la_auth_req_params *auth_param =
(struct icp_qat_fw_la_auth_req_params *)
((char *)&req_tmpl->serv_specif_rqpars +
sizeof(struct icp_qat_fw_la_cipher_req_params));
/* CD setup */
cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg,
digestsize);
hash->sha.inner_setup.auth_counter.counter =
cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
/* Request setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_CMP_AUTH_RES);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
/* Cipher CD config setup */
cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset =
(sizeof(struct icp_qat_hw_auth_setup) +
roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
/* Auth CD config setup */
hash_cd_ctrl->hash_cfg_offset = 0;
hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
hash_cd_ctrl->inner_res_sz = digestsize;
hash_cd_ctrl->final_sz = digestsize;
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
hash_cd_ctrl->inner_state1_sz =
round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
hash_cd_ctrl->inner_state2_sz =
round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
break;
default:
break;
}
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
auth_param->auth_res_sz = digestsize;
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
return 0;
}
static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req,
struct icp_qat_hw_cipher_algo_blk *cd,
const u8 *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
int mode = ctx->mode;
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
cd_pars->u.s.content_desc_params_sz =
sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
/* Store both XTS keys in CD, only the first key is sent
* to the HW, the second key is used for tweak calculation
*/
memcpy(cd->ucs_aes.key, key, keylen);
keylen = keylen / 2;
} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
memcpy(cd->ucs_aes.key, key, keylen);
keylen = round_up(keylen, 16);
} else {
memcpy(cd->aes.key, key, keylen);
}
/* Cipher CD config setup */
cd_ctrl->cipher_key_sz = keylen >> 3;
cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cd_ctrl->cipher_cfg_offset = 0;
ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
}
static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
}
static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
u8 *key_reverse)
{
struct crypto_aes_ctx aes_expanded;
int nrounds;
u8 *key;
aes_expandkey(&aes_expanded, key_forward, keylen);
if (keylen == AES_KEYSIZE_128) {
nrounds = 10;
key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
memcpy(key_reverse, key, AES_BLOCK_SIZE);
} else {
/* AES_KEYSIZE_256 */
nrounds = 14;
key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
memcpy(key_reverse, key, AES_BLOCK_SIZE);
memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
}
}
static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
/* Key reversing not supported, set no convert */
dec_cd->aes.cipher_config.val =
QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
/* In-place key reversal */
qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
dec_cd->ucs_aes.key);
} else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
dec_cd->aes.cipher_config.val =
QAT_AES_HW_CONFIG_DEC(alg, mode);
} else {
dec_cd->aes.cipher_config.val =
QAT_AES_HW_CONFIG_ENC(alg, mode);
}
}
static int qat_alg_validate_key(int key_len, int *alg, int mode)
{
if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
switch (key_len) {
case AES_KEYSIZE_128:
*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
break;
case AES_KEYSIZE_192:
*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
break;
case AES_KEYSIZE_256:
*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
break;
default:
return -EINVAL;
}
} else {
switch (key_len) {
case AES_KEYSIZE_128 << 1:
*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
break;
case AES_KEYSIZE_256 << 1:
*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen, int mode)
{
struct crypto_authenc_keys keys;
int alg;
if (crypto_authenc_extractkeys(&keys, key, keylen))
goto bad_key;
if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
goto bad_key;
if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
goto error;
if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
goto error;
memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
memzero_explicit(&keys, sizeof(keys));
return -EFAULT;
}
static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
const u8 *key,
unsigned int keylen,
int mode)
{
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
return -EINVAL;
qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
}
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
return qat_alg_aead_init_sessions(tfm, key, keylen,
ICP_QAT_HW_CIPHER_CBC_MODE);
}
static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = NULL;
int node = numa_node_id();
struct device *dev;
int ret;
inst = qat_crypto_get_instance_node(node);
if (!inst)
return -EINVAL;
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
ret = -ENOMEM;
goto out_free_inst;
}
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
ret = -ENOMEM;
goto out_free_enc;
}
ret = qat_alg_aead_init_sessions(tfm, key, keylen,
ICP_QAT_HW_CIPHER_CBC_MODE);
if (ret)
goto out_free_all;
return 0;
out_free_all:
memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
out_free_inst:
ctx->inst = NULL;
qat_crypto_put_instance(inst);
return ret;
}
static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
if (ctx->enc_cd)
return qat_alg_aead_rekey(tfm, key, keylen);
else
return qat_alg_aead_newkey(tfm, key, keylen);
}
static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_request *qat_req)
{
struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct aead_request *areq = qat_req->aead_req;
u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EBADMSG;
aead_request_complete(areq, res);
}
static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
{
struct skcipher_request *sreq = qat_req->skcipher_req;
u64 iv_lo_prev;
u64 iv_lo;
u64 iv_hi;
memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
iv_lo = be64_to_cpu(qat_req->iv_lo);
iv_hi = be64_to_cpu(qat_req->iv_hi);
iv_lo_prev = iv_lo;
iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
if (iv_lo < iv_lo_prev)
iv_hi++;
qat_req->iv_lo = cpu_to_be64(iv_lo);
qat_req->iv_hi = cpu_to_be64(iv_hi);
}
static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
{
struct skcipher_request *sreq = qat_req->skcipher_req;
int offset = sreq->cryptlen - AES_BLOCK_SIZE;
struct scatterlist *sgl;
if (qat_req->encryption)
sgl = sreq->dst;
else
sgl = sreq->src;
scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
}
static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
{
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
switch (ctx->mode) {
case ICP_QAT_HW_CIPHER_CTR_MODE:
qat_alg_update_iv_ctr_mode(qat_req);
break;
case ICP_QAT_HW_CIPHER_CBC_MODE:
qat_alg_update_iv_cbc_mode(qat_req);
break;
case ICP_QAT_HW_CIPHER_XTS_MODE:
break;
default:
dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
ctx->mode);
}
}
static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_request *qat_req)
{
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct skcipher_request *sreq = qat_req->skcipher_req;
u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
if (qat_req->encryption)
qat_alg_update_iv(qat_req);
memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
skcipher_request_complete(sreq, res);
}
void qat_alg_callback(void *resp)
{
struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data;
struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req);
qat_alg_send_backlog(backlog);
}
static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
struct qat_crypto_instance *inst,
struct crypto_async_request *base)
{
struct qat_alg_req *alg_req = &qat_req->alg_req;
alg_req->fw_req = (u32 *)&qat_req->req;
alg_req->tx_ring = inst->sym_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(alg_req);
}
static int qat_alg_aead_dec(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
gfp_t f = qat_algs_alloc_flags(&areq->base);
int ret;
u32 cipher_len;
cipher_len = areq->cryptlen - digst_size;
if (cipher_len % AES_BLOCK_SIZE != 0)
return -EINVAL;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = cipher_len;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
static int qat_alg_aead_enc(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
gfp_t f = qat_algs_alloc_flags(&areq->base);
struct icp_qat_fw_la_bulk_req *msg;
u8 *iv = areq->iv;
int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
cipher_param->cipher_length = areq->cryptlen;
cipher_param->cipher_offset = areq->assoclen;
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen;
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
const u8 *key, unsigned int keylen,
int mode)
{
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
}
static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
const u8 *key, unsigned int keylen,
int mode)
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
int node = numa_node_id();
int ret;
inst = qat_crypto_get_instance_node(node);
if (!inst)
return -EINVAL;
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
ret = -ENOMEM;
goto out_free_instance;
}
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
ret = -ENOMEM;
goto out_free_enc;
}
ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
if (ret)
goto out_free_all;
return 0;
out_free_all:
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
dma_free_coherent(dev, sizeof(*ctx->dec_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
dma_free_coherent(dev, sizeof(*ctx->enc_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
out_free_instance:
ctx->inst = NULL;
qat_crypto_put_instance(inst);
return ret;
}
static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen,
int mode)
{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->mode = mode;
if (ctx->enc_cd)
return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else
return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
}
static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_CBC_MODE);
}
static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_CTR_MODE);
}
static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
if (keylen >> 1 == AES_KEYSIZE_192) {
ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
if (ret)
return ret;
ctx->fallback = true;
return 0;
}
ctx->fallback = false;
ret = qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_XTS_MODE);
if (ret)
return ret;
if (HW_CAP_AES_V2(ctx->inst->accel_dev))
ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
keylen / 2);
return ret;
}
static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
{
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
u8 *iv = qat_req->skcipher_req->iv;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
crypto_cipher_encrypt_one(ctx->tweak,
(u8 *)cipher_param->u.cipher_IV_array,
iv);
else
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
}
static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
gfp_t f = qat_algs_alloc_flags(&req->base);
struct icp_qat_fw_la_bulk_req *msg;
int ret;
if (req->cryptlen == 0)
return 0;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
qat_req->encryption = true;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
qat_alg_set_req_iv(qat_req);
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
{
if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
return qat_alg_skcipher_encrypt(req);
}
static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
struct skcipher_request *nreq = skcipher_request_ctx(req);
if (req->cryptlen < XTS_BLOCK_SIZE)
return -EINVAL;
if (ctx->fallback) {
memcpy(nreq, req, sizeof(*req));
skcipher_request_set_tfm(nreq, ctx->ftfm);
return crypto_skcipher_encrypt(nreq);
}
return qat_alg_skcipher_encrypt(req);
}
static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
gfp_t f = qat_algs_alloc_flags(&req->base);
struct icp_qat_fw_la_bulk_req *msg;
int ret;
if (req->cryptlen == 0)
return 0;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
qat_req->encryption = false;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req);
ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
return ret;
}
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
{
if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
return qat_alg_skcipher_decrypt(req);
}
static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
struct skcipher_request *nreq = skcipher_request_ctx(req);
if (req->cryptlen < XTS_BLOCK_SIZE)
return -EINVAL;
if (ctx->fallback) {
memcpy(nreq, req, sizeof(*req));
skcipher_request_set_tfm(nreq, ctx->ftfm);
return crypto_skcipher_decrypt(nreq);
}
return qat_alg_skcipher_decrypt(req);
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
}
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
}
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
}
static void qat_alg_aead_exit(struct crypto_aead *tfm)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
crypto_free_shash(ctx->hash_tfm);
if (!inst)
return;
dev = &GET_DEV(inst->accel_dev);
if (ctx->enc_cd) {
memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
}
if (ctx->dec_cd) {
memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
}
qat_crypto_put_instance(inst);
}
static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
int reqsize;
ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->ftfm))
return PTR_ERR(ctx->ftfm);
ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->tweak)) {
crypto_free_skcipher(ctx->ftfm);
return PTR_ERR(ctx->tweak);
}
reqsize = max(sizeof(struct qat_crypto_request),
sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(ctx->ftfm));
crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
if (!inst)
return;
dev = &GET_DEV(inst->accel_dev);
if (ctx->enc_cd) {
memset(ctx->enc_cd, 0,
sizeof(struct icp_qat_hw_cipher_algo_blk));
dma_free_coherent(dev,
sizeof(struct icp_qat_hw_cipher_algo_blk),
ctx->enc_cd, ctx->enc_cd_paddr);
}
if (ctx->dec_cd) {
memset(ctx->dec_cd, 0,
sizeof(struct icp_qat_hw_cipher_algo_blk));
dma_free_coherent(dev,
sizeof(struct icp_qat_hw_cipher_algo_blk),
ctx->dec_cd, ctx->dec_cd_paddr);
}
qat_crypto_put_instance(inst);
}
static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->ftfm)
crypto_free_skcipher(ctx->ftfm);
if (ctx->tweak)
crypto_free_cipher(ctx->tweak);
qat_alg_skcipher_exit_tfm(tfm);
}
static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
},
.init = qat_alg_aead_sha1_init,
.exit = qat_alg_aead_exit,
.setkey = qat_alg_aead_setkey,
.decrypt = qat_alg_aead_dec,
.encrypt = qat_alg_aead_enc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
},
.init = qat_alg_aead_sha256_init,
.exit = qat_alg_aead_exit,
.setkey = qat_alg_aead_setkey,
.decrypt = qat_alg_aead_dec,
.encrypt = qat_alg_aead_enc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
},
.init = qat_alg_aead_sha512_init,
.exit = qat_alg_aead_exit,
.setkey = qat_alg_aead_setkey,
.decrypt = qat_alg_aead_dec,
.encrypt = qat_alg_aead_enc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
} };
static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
.base.cra_priority = 4001,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = qat_alg_skcipher_init_tfm,
.exit = qat_alg_skcipher_exit_tfm,
.setkey = qat_alg_skcipher_cbc_setkey,
.decrypt = qat_alg_skcipher_blk_decrypt,
.encrypt = qat_alg_skcipher_blk_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}, {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
.base.cra_priority = 4001,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = qat_alg_skcipher_init_tfm,
.exit = qat_alg_skcipher_exit_tfm,
.setkey = qat_alg_skcipher_ctr_setkey,
.decrypt = qat_alg_skcipher_decrypt,
.encrypt = qat_alg_skcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}, {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
.base.cra_priority = 4001,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = qat_alg_skcipher_init_xts_tfm,
.exit = qat_alg_skcipher_exit_xts_tfm,
.setkey = qat_alg_skcipher_xts_setkey,
.decrypt = qat_alg_skcipher_xts_decrypt,
.encrypt = qat_alg_skcipher_xts_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
} };
int qat_algs_register(void)
{
int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs != 1)
goto unlock;
ret = crypto_register_skciphers(qat_skciphers,
ARRAY_SIZE(qat_skciphers));
if (ret)
goto unlock;
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
if (ret)
goto unreg_algs;
unlock:
mutex_unlock(&algs_lock);
return ret;
unreg_algs:
crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
goto unlock;
}
void qat_algs_unregister(void)
{
mutex_lock(&algs_lock);
if (--active_devs != 0)
goto unlock;
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
unlock:
mutex_unlock(&algs_lock);
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_algs.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
#define ADF_ARB_REG_SIZE 0x4
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
ADF_CSR_WR(csr_addr, (arb_offset) + \
(ADF_ARB_REG_SIZE * (index)), value)
#define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
(ADF_ARB_REG_SIZE * (index)), value)
int adf_init_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
unsigned long ae_mask = hw_data->ae_mask;
u32 arb_off, wt_off, arb_cfg;
const u32 *thd_2_arb_cfg;
struct arb_info info;
int arb, i;
hw_data->get_arb_info(&info);
arb_cfg = info.arb_cfg;
arb_off = info.arb_offset;
wt_off = info.wt2sam_offset;
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled. */
for (arb = 0; arb < ADF_ARB_NUM; arb++)
WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
/* Map worker threads to service arbiters */
thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
for_each_set_bit(i, &ae_mask, hw_data->num_engines)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
return 0;
}
EXPORT_SYMBOL_GPL(adf_init_arb);
void adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u32 tx_ring_mask = hw_data->tx_rings_mask;
u32 shift = hw_data->tx_rx_gap;
u32 arben, arben_tx, arben_rx;
u32 rx_ring_mask;
/*
* Enable arbitration on a ring only if the TX half of the ring mask
* matches the RX part. This results in writes to CSR on both TX and
* RX update - only one is necessary, but both are done for
* simplicity.
*/
rx_ring_mask = tx_ring_mask << shift;
arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
arben = arben_tx & arben_rx;
csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
ring->bank->bank_number, arben);
}
void adf_exit_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u32 arb_off, wt_off;
struct arb_info info;
void __iomem *csr;
unsigned int i;
hw_data->get_arb_info(&info);
arb_off = info.arb_offset;
wt_off = info.wt2sam_offset;
if (!accel_dev->transport)
return;
csr = accel_dev->transport->banks[0].csr_addr;
hw_data->get_arb_info(&info);
/* Reset arbiter configuration */
for (i = 0; i < ADF_ARB_NUM; i++)
WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
/* Unmap worker threads to service arbiters */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
EXPORT_SYMBOL_GPL(adf_exit_arb);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_fw_counters.h"
#define ADF_FW_COUNTERS_MAX_PADDING 16
enum adf_fw_counters_types {
ADF_FW_REQUESTS,
ADF_FW_RESPONSES,
ADF_FW_COUNTERS_COUNT
};
static const char * const adf_fw_counter_names[] = {
[ADF_FW_REQUESTS] = "Requests",
[ADF_FW_RESPONSES] = "Responses",
};
static_assert(ARRAY_SIZE(adf_fw_counter_names) == ADF_FW_COUNTERS_COUNT);
struct adf_ae_counters {
u16 ae;
u64 values[ADF_FW_COUNTERS_COUNT];
};
struct adf_fw_counters {
u16 ae_count;
struct adf_ae_counters ae_counters[];
};
static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae,
u64 req_count, u64 resp_count)
{
ae_counters->ae = ae;
ae_counters->values[ADF_FW_REQUESTS] = req_count;
ae_counters->values[ADF_FW_RESPONSES] = resp_count;
}
static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev,
struct adf_fw_counters *fw_counters)
{
struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
unsigned long ae_mask;
unsigned int i;
unsigned long ae;
/* Ignore the admin AEs */
ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
if (hweight_long(ae_mask) > fw_counters->ae_count)
return -EINVAL;
i = 0;
for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
u64 req_count, resp_count;
int ret;
ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count);
if (ret)
return ret;
adf_fw_counters_parse_ae_values(&fw_counters->ae_counters[i++], ae,
req_count, resp_count);
}
return 0;
}
static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count)
{
struct adf_fw_counters *fw_counters;
if (unlikely(!ae_count))
return ERR_PTR(-EINVAL);
fw_counters = kmalloc(struct_size(fw_counters, ae_counters, ae_count), GFP_KERNEL);
if (!fw_counters)
return ERR_PTR(-ENOMEM);
fw_counters->ae_count = ae_count;
return fw_counters;
}
/**
* adf_fw_counters_get() - Return FW counters for the provided device.
* @accel_dev: Pointer to a QAT acceleration device
*
* Allocates and returns a table of counters containing execution statistics
* for each non-admin AE available through the supplied acceleration device.
* The caller becomes the owner of such memory and is responsible for
* the deallocation through a call to kfree().
*
* Returns: a pointer to a dynamically allocated struct adf_fw_counters
* on success, or a negative value on error.
*/
static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
struct adf_fw_counters *fw_counters;
unsigned long ae_count;
int ret;
if (!adf_dev_started(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
return ERR_PTR(-EFAULT);
}
/* Ignore the admin AEs */
ae_count = hweight_long(hw_data->ae_mask & ~hw_data->admin_ae_mask);
fw_counters = adf_fw_counters_allocate(ae_count);
if (IS_ERR(fw_counters))
return fw_counters;
ret = adf_fw_counters_load_from_device(accel_dev, fw_counters);
if (ret) {
kfree(fw_counters);
dev_err(&GET_DEV(accel_dev),
"Failed to create QAT fw_counters file table [%d].\n", ret);
return ERR_PTR(ret);
}
return fw_counters;
}
static void *qat_fw_counters_seq_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_fw_counters *fw_counters = sfile->private;
if (*pos == 0)
return SEQ_START_TOKEN;
if (*pos > fw_counters->ae_count)
return NULL;
return &fw_counters->ae_counters[*pos - 1];
}
static void *qat_fw_counters_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_fw_counters *fw_counters = sfile->private;
(*pos)++;
if (*pos > fw_counters->ae_count)
return NULL;
return &fw_counters->ae_counters[*pos - 1];
}
static void qat_fw_counters_seq_stop(struct seq_file *sfile, void *v) {}
static int qat_fw_counters_seq_show(struct seq_file *sfile, void *v)
{
int i;
if (v == SEQ_START_TOKEN) {
seq_puts(sfile, "AE ");
for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
seq_printf(sfile, " %*s", ADF_FW_COUNTERS_MAX_PADDING,
adf_fw_counter_names[i]);
} else {
struct adf_ae_counters *ae_counters = (struct adf_ae_counters *)v;
seq_printf(sfile, "%2d:", ae_counters->ae);
for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
seq_printf(sfile, " %*llu", ADF_FW_COUNTERS_MAX_PADDING,
ae_counters->values[i]);
}
seq_putc(sfile, '\n');
return 0;
}
static const struct seq_operations qat_fw_counters_sops = {
.start = qat_fw_counters_seq_start,
.next = qat_fw_counters_seq_next,
.stop = qat_fw_counters_seq_stop,
.show = qat_fw_counters_seq_show,
};
static int qat_fw_counters_file_open(struct inode *inode, struct file *file)
{
struct adf_accel_dev *accel_dev = inode->i_private;
struct seq_file *fw_counters_seq_file;
struct adf_fw_counters *fw_counters;
int ret;
fw_counters = adf_fw_counters_get(accel_dev);
if (IS_ERR(fw_counters))
return PTR_ERR(fw_counters);
ret = seq_open(file, &qat_fw_counters_sops);
if (unlikely(ret)) {
kfree(fw_counters);
return ret;
}
fw_counters_seq_file = file->private_data;
fw_counters_seq_file->private = fw_counters;
return ret;
}
static int qat_fw_counters_file_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
kfree(seq->private);
seq->private = NULL;
return seq_release(inode, file); }
static const struct file_operations qat_fw_counters_fops = {
.owner = THIS_MODULE,
.open = qat_fw_counters_file_open,
.read = seq_read,
.llseek = seq_lseek,
.release = qat_fw_counters_file_release,
};
/**
* adf_fw_counters_dbgfs_add() - Create a debugfs file containing FW
* execution counters.
* @accel_dev: Pointer to a QAT acceleration device
*
* Function creates a file to display a table with statistics for the given
* QAT acceleration device. The table stores device specific execution values
* for each AE, such as the number of requests sent to the FW and responses
* received from the FW.
*
* Return: void
*/
void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev)
{
accel_dev->fw_cntr_dbgfile = debugfs_create_file("fw_counters", 0400,
accel_dev->debugfs_dir,
accel_dev,
&qat_fw_counters_fops);
}
/**
* adf_fw_counters_dbgfs_rm() - Remove the debugfs file containing FW counters.
* @accel_dev: Pointer to a QAT acceleration device.
*
* Function removes the file providing the table of statistics for the given
* QAT acceleration device.
*
* Return: void
*/
void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
debugfs_remove(accel_dev->fw_cntr_dbgfile);
accel_dev->fw_cntr_dbgfile = NULL;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_fw_counters.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_pfvf_pf_msg.h"
#define ADF_VF2PF_RATELIMIT_INTERVAL 8
#define ADF_VF2PF_RATELIMIT_BURST 130
static struct workqueue_struct *pf2vf_resp_wq;
struct adf_pf2vf_resp {
struct work_struct pf2vf_resp_work;
struct adf_accel_vf_info *vf_info;
};
static void adf_iov_send_resp(struct work_struct *work)
{
struct adf_pf2vf_resp *pf2vf_resp =
container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
u32 vf_nr = vf_info->vf_nr;
bool ret;
ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
if (ret)
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
kfree(pf2vf_resp);
}
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
{
struct adf_pf2vf_resp *pf2vf_resp;
pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
if (!pf2vf_resp)
return;
pf2vf_resp->vf_info = vf_info;
INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
}
static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
int totalvfs = pci_sriov_get_totalvfs(pdev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_accel_vf_info *vf_info;
int i;
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
i++, vf_info++) {
/* This ptr will be populated when VFs will be created */
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
vf_info->vf_compat_ver = 0;
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
ADF_VF2PF_RATELIMIT_INTERVAL,
ADF_VF2PF_RATELIMIT_BURST);
}
/* Set Valid bits in AE Thread to PCIe Function Mapping */
if (hw_data->configure_iov_threads)
hw_data->configure_iov_threads(accel_dev, true);
/* Enable VF to PF interrupts for all VFs */
adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
/*
* Due to the hardware design, when SR-IOV and the ring arbiter
* are enabled all the VFs supported in hardware must be enabled in
* order for all the hardware resources (i.e. bundles) to be usable.
* When SR-IOV is enabled, each of the VFs will own one bundle.
*/
return pci_enable_sriov(pdev, totalvfs);
}
/**
* adf_disable_sriov() - Disable SRIOV for the device
* @accel_dev: Pointer to accel device.
*
* Function disables SRIOV for the accel device.
*
* Return: 0 on success, error code otherwise.
*/
void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
struct adf_accel_vf_info *vf;
int i;
if (!accel_dev->pf.vf_info)
return;
adf_pf2vf_notify_restarting(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
adf_disable_all_vf2pf_interrupts(accel_dev);
/* Clear Valid bits in AE Thread to PCIe Function Mapping */
if (hw_data->configure_iov_threads)
hw_data->configure_iov_threads(accel_dev, false);
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
mutex_destroy(&vf->pf2vf_lock);
kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL;
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
/**
* adf_sriov_configure() - Enable SRIOV for the device
* @pdev: Pointer to PCI device.
* @numvfs: Number of virtual functions (VFs) to enable.
*
* Note that the @numvfs parameter is ignored and all VFs supported by the
* device are enabled due to the design of the hardware.
*
* Function enables SRIOV for the PCI device.
*
* Return: number of VFs enabled on success, error code otherwise.
*/
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
int totalvfs = pci_sriov_get_totalvfs(pdev);
unsigned long val;
int ret;
if (!accel_dev) {
dev_err(&pdev->dev, "Failed to find accel_dev\n");
return -EFAULT;
}
if (!device_iommu_mapped(&pdev->dev))
dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
if (accel_dev->pf.vf_info) {
dev_info(&pdev->dev, "Already enabled for this device\n");
return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
if (adf_devmgr_in_reset(accel_dev) ||
adf_dev_in_use(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Device busy\n");
return -EBUSY;
}
ret = adf_dev_down(accel_dev, true);
if (ret)
return ret;
}
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
return -EFAULT;
val = 0;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
ADF_NUM_CY, (void *)&val, ADF_DEC))
return -EFAULT;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
&val, ADF_DEC);
if (ret)
return ret;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
/* Allocate memory for VF info structs */
accel_dev->pf.vf_info = kcalloc(totalvfs,
sizeof(struct adf_accel_vf_info),
GFP_KERNEL);
if (!accel_dev->pf.vf_info)
return -ENOMEM;
if (adf_dev_up(accel_dev, false)) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
accel_dev->accel_id);
return -EFAULT;
}
ret = adf_enable_sriov(accel_dev);
if (ret)
return ret;
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
void adf_exit_pf_wq(void)
{
if (pf2vf_resp_wq) {
destroy_workqueue(pf2vf_resp_wq);
pf2vf_resp_wq = NULL;
}
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_sriov.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/minmax.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_utils.h"
#include "adf_pfvf_vf_msg.h"
#include "adf_pfvf_vf_proto.h"
#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
#define ADF_PFVF_MSG_ACK_DELAY 2
#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
/* How often to retry if there is no response */
#define ADF_PFVF_MSG_RESP_RETRIES 5
#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
ADF_PFVF_MSG_ACK_MAX_RETRY + \
ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
/**
* adf_send_vf2pf_msg() - send VF to PF message
* @accel_dev: Pointer to acceleration device
* @msg: Message to send
*
* This function allows the VF to send a message to the PF.
*
* Return: 0 on success, error code otherwise.
*/
int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
{
struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
&accel_dev->vf.vf2pf_lock);
}
/**
* adf_recv_pf2vf_msg() - receive a PF to VF message
* @accel_dev: Pointer to acceleration device
*
* This function allows the VF to receive a message from the PF.
*
* Return: a valid message on success, zero otherwise.
*/
static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
{
struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
}
/**
* adf_send_vf2pf_req() - send VF2PF request message
* @accel_dev: Pointer to acceleration device.
* @msg: Request message to send
* @resp: Returned PF response
*
* This function sends a message that requires a response from the VF to the PF
* and waits for a reply.
*
* Return: 0 on success, error code otherwise.
*/
int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
struct pfvf_message *resp)
{
unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
int ret;
reinit_completion(&accel_dev->vf.msg_received);
/* Send request from VF to PF */
do {
ret = adf_send_vf2pf_msg(accel_dev, msg);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to send request msg to PF\n");
return ret;
}
/* Wait for response, if it times out retry */
ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
timeout);
if (ret) {
if (likely(resp))
*resp = accel_dev->vf.response;
/* Once copied, set to an invalid value */
accel_dev->vf.response.type = 0;
return 0;
}
dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
} while (--retries);
return -EIO;
}
static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
u8 *type, u8 *data)
{
struct pfvf_message req = { 0 };
struct pfvf_message resp = { 0 };
u8 blk_type;
u8 blk_byte;
u8 msg_type;
u8 max_data;
int err;
/* Convert the block type to {small, medium, large} size category */
if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
} else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
*type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
} else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
*type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
} else {
dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
return -EINVAL;
}
/* Sanity check */
if (*data > max_data) {
dev_err(&GET_DEV(accel_dev),
"Invalid byte %s %u for message type %u\n",
crc ? "count" : "index", *data, *type);
return -EINVAL;
}
/* Build the block message */
req.type = msg_type;
req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
err = adf_send_vf2pf_req(accel_dev, req, &resp);
if (err)
return err;
*type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
*data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
return 0;
}
static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
u8 index, u8 *data)
{
int ret;
ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
if (ret < 0)
return ret;
if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
dev_err(&GET_DEV(accel_dev),
"Unexpected BLKMSG response type %u, byte 0x%x\n",
type, index);
return -EFAULT;
}
*data = index;
return 0;
}
static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
u8 bytes, u8 *crc)
{
int ret;
/* The count of bytes refers to a length, however shift it to a 0-based
* count to avoid overflows. Thus, a request for 0 bytes is technically
* valid.
*/
--bytes;
ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
if (ret < 0)
return ret;
if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
dev_err(&GET_DEV(accel_dev),
"Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
type, bytes);
return -EFAULT;
}
*crc = bytes;
return 0;
}
/**
* adf_send_vf2pf_blkmsg_req() - retrieve block message
* @accel_dev: Pointer to acceleration VF device.
* @type: The block message type, see adf_pfvf_msg.h for allowed values
* @buffer: input buffer where to place the received data
* @buffer_len: buffer length as input, the amount of written bytes on output
*
* Request a message of type 'type' over the block message transport.
* This function will send the required amount block message requests and
* return the overall content back to the caller through the provided buffer.
* The buffer should be large enough to contain the requested message type,
* otherwise the response will be truncated.
*
* Return: 0 on success, error code otherwise.
*/
int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
u8 *buffer, unsigned int *buffer_len)
{
unsigned int index;
unsigned int msg_len;
int ret;
u8 remote_crc;
u8 local_crc;
if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
type);
return -EINVAL;
}
if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
dev_err(&GET_DEV(accel_dev),
"Buffer size too small for a block message\n");
return -EINVAL;
}
ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
ADF_PFVF_BLKMSG_VER_BYTE,
&buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
if (unlikely(ret))
return ret;
if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
dev_err(&GET_DEV(accel_dev),
"Invalid version 0 received for block request %u", type);
return -EFAULT;
}
ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
ADF_PFVF_BLKMSG_LEN_BYTE,
&buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
if (unlikely(ret))
return ret;
if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
dev_err(&GET_DEV(accel_dev),
"Invalid size 0 received for block request %u", type);
return -EFAULT;
}
/* We need to pick the minimum since there is no way to request a
* specific version. As a consequence any scenario is possible:
* - PF has a newer (longer) version which doesn't fit in the buffer
* - VF expects a newer (longer) version, so we must not ask for
* bytes in excess
* - PF and VF share the same version, no problem
*/
msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
msg_len = min(*buffer_len, msg_len);
/* Get the payload */
for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
&buffer[index]);
if (unlikely(ret))
return ret;
}
ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
if (unlikely(ret))
return ret;
local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
if (unlikely(local_crc != remote_crc)) {
dev_err(&GET_DEV(accel_dev),
"CRC error on msg type %d. Local %02X, remote %02X\n",
type, local_crc, remote_crc);
return -EIO;
}
*buffer_len = msg_len;
return 0;
}
static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
struct pfvf_message msg)
{
switch (msg.type) {
case ADF_PF2VF_MSGTYPE_RESTARTING:
dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
adf_pf2vf_handle_pf_restarting(accel_dev);
return false;
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
dev_dbg(&GET_DEV(accel_dev),
"Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
msg.type, msg.data);
accel_dev->vf.response = msg;
complete(&accel_dev->vf.msg_received);
return true;
default:
dev_err(&GET_DEV(accel_dev),
"Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
msg.type, msg.data);
}
return false;
}
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg;
msg = adf_recv_pf2vf_msg(accel_dev);
if (msg.type) /* Invalid or no message */
return adf_handle_pf2vf_msg(accel_dev, msg);
/* No replies for PF->VF messages at present */
return true;
}
/**
* adf_enable_vf2pf_comms() - Function enables communication from vf to pf
*
* @accel_dev: Pointer to acceleration device virtual function.
*
* Return: 0 on success, error code otherwise.
*/
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
int ret;
adf_pfvf_crc_init();
adf_enable_pf2vf_interrupts(accel_dev);
ret = adf_vf2pf_request_version(accel_dev);
if (ret)
return ret;
ret = adf_vf2pf_get_capabilities(accel_dev);
if (ret)
return ret;
ret = adf_vf2pf_get_ring_to_svc(accel_dev);
return ret;
}
EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2021 Intel Corporation */
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen2_pfvf.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_vf_proto.h"
#include "adf_pfvf_utils.h"
/* VF2PF interrupts */
#define ADF_GEN2_VF_MSK 0xFFFF
#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
#define ADF_GEN2_PF_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_GEN2_VF_PF2VF_OFFSET 0x200
#define ADF_GEN2_CSR_IN_USE 0x6AC2
#define ADF_GEN2_CSR_IN_USE_MASK 0xFFFE
enum gen2_csr_pos {
ADF_GEN2_CSR_PF2VF_OFFSET = 0,
ADF_GEN2_CSR_VF2PF_OFFSET = 16,
};
#define ADF_PFVF_GEN2_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN2_MSGTYPE_MASK 0x0F
#define ADF_PFVF_GEN2_MSGDATA_SHIFT 6
#define ADF_PFVF_GEN2_MSGDATA_MASK 0x3FF
static const struct pfvf_csr_format csr_gen2_fmt = {
{ ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
{ ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
};
#define ADF_PFVF_MSG_RETRY_DELAY 5
#define ADF_PFVF_MSG_MAX_RETRIES 3
static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
{
return ADF_GEN2_PF_PF2VF_OFFSET(i);
}
static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
{
return ADF_GEN2_VF_PF2VF_OFFSET;
}
static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
if (vf_mask & ADF_GEN2_VF_MSK) {
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
& ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
}
}
static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
{
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
| ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
}
static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
{
u32 sources, disabled, pending;
u32 errsou3, errmsk3;
/* Get the interrupt sources triggered by VFs */
errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
if (!sources)
return 0;
/* Get the already disabled interrupts */
errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
pending = sources & ~disabled;
if (!pending)
return 0;
/* Due to HW limitations, when disabling the interrupts, we can't
* just disable the requested sources, as this would lead to missed
* interrupts if ERRSOU3 changes just before writing to ERRMSK3.
* To work around it, disable all and re-enable only the sources that
* are not in vf_mask and were not already disabled. Re-enabling will
* trigger a new interrupt for the sources that have changed in the
* meantime, if any.
*/
errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
/* Return the sources of the (new) interrupt(s) */
return pending;
}
static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
{
return ADF_PFVF_INT << offset;
}
static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
{
return (csr_msg & 0xFFFF) << offset;
}
static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
{
return (csr_val >> offset) & 0xFFFF;
}
static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
{
return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
}
static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
{
*msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
}
static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
{
*msg |= (ADF_GEN2_CSR_IN_USE << offset);
}
static bool is_legacy_user_pfvf_message(u32 msg)
{
return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
}
static bool is_pf2vf_notification(u8 msg_type)
{
switch (msg_type) {
case ADF_PF2VF_MSGTYPE_RESTARTING:
return true;
default:
return false;
}
}
static bool is_vf2pf_notification(u8 msg_type)
{
switch (msg_type) {
case ADF_VF2PF_MSGTYPE_INIT:
case ADF_VF2PF_MSGTYPE_SHUTDOWN:
return true;
default:
return false;
}
}
struct pfvf_gen2_params {
u32 pfvf_offset;
struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
enum gen2_csr_pos local_offset;
enum gen2_csr_pos remote_offset;
bool (*is_notification_message)(u8 msg_type);
u8 compat_ver;
};
static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
struct pfvf_gen2_params *params)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
enum gen2_csr_pos remote_offset = params->remote_offset;
enum gen2_csr_pos local_offset = params->local_offset;
unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
struct mutex *lock = params->csr_lock;
u32 pfvf_offset = params->pfvf_offset;
u32 int_bit;
u32 csr_val;
u32 csr_msg;
int ret;
/* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
* allows us to build and read messages as if they where all 0 based.
* However, send and receive are in a single shared 32 bits register,
* so we need to shift and/or mask the message half before decoding
* it and after encoding it. Which one to shift depends on the
* direction.
*/
int_bit = gen2_csr_get_int_bit(local_offset);
csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
if (unlikely(!csr_msg))
return -EINVAL;
/* Prepare for CSR format, shifting the wire message in place and
* setting the in use pattern
*/
csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
gen2_csr_set_in_use(&csr_msg, remote_offset);
mutex_lock(lock);
start:
/* Check if the PFVF CSR is in use by remote function */
csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
if (gen2_csr_is_in_use(csr_val, local_offset)) {
dev_dbg(&GET_DEV(accel_dev),
"PFVF CSR in use by remote function\n");
goto retry;
}
/* Attempt to get ownership of the PFVF CSR */
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
/* Wait for confirmation from remote func it received the message */
ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
ADF_PFVF_MSG_ACK_DELAY_US,
ADF_PFVF_MSG_ACK_MAX_DELAY_US,
true, pmisc_addr, pfvf_offset);
if (unlikely(ret < 0)) {
dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
csr_val &= ~int_bit;
}
/* For fire-and-forget notifications, the receiver does not clear
* the in-use pattern. This is used to detect collisions.
*/
if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
/* Collision must have overwritten the message */
dev_err(&GET_DEV(accel_dev),
"Collision on notification - PFVF CSR overwritten by remote function\n");
goto retry;
}
/* If the far side did not clear the in-use pattern it is either
* 1) Notification - message left intact to detect collision
* 2) Older protocol (compatibility version < 3) on the far side
* where the sender is responsible for clearing the in-use
* pattern after the received has acknowledged receipt.
* In either case, clear the in-use pattern now.
*/
if (gen2_csr_is_in_use(csr_val, remote_offset)) {
gen2_csr_clear_in_use(&csr_val, remote_offset);
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
}
out:
mutex_unlock(lock);
return ret;
retry:
if (--retries) {
msleep(ADF_PFVF_MSG_RETRY_DELAY);
goto start;
} else {
ret = -EBUSY;
goto out;
}
}
static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
struct pfvf_gen2_params *params)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
enum gen2_csr_pos remote_offset = params->remote_offset;
enum gen2_csr_pos local_offset = params->local_offset;
u32 pfvf_offset = params->pfvf_offset;
struct pfvf_message msg = { 0 };
u32 int_bit;
u32 csr_val;
u16 csr_msg;
int_bit = gen2_csr_get_int_bit(local_offset);
/* Read message */
csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
if (!(csr_val & int_bit)) {
dev_info(&GET_DEV(accel_dev),
"Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
return msg;
}
/* Extract the message from the CSR */
csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
/* Ignore legacy non-system (non-kernel) messages */
if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
dev_dbg(&GET_DEV(accel_dev),
"Ignored non-system message (0x%.8x);\n", csr_val);
/* Because this must be a legacy message, the far side
* must clear the in-use pattern, so don't do it.
*/
return msg;
}
/* Return the pfvf_message format */
msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
/* The in-use pattern is not cleared for notifications (so that
* it can be used for collision detection) or older implementations
*/
if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
!params->is_notification_message(msg.type))
gen2_csr_clear_in_use(&csr_val, remote_offset);
/* To ACK, clear the INT bit */
csr_val &= ~int_bit;
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
return msg;
}
static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
u32 pfvf_offset, struct mutex *csr_lock)
{
struct pfvf_gen2_params params = {
.csr_lock = csr_lock,
.pfvf_offset = pfvf_offset,
.local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
.remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
.is_notification_message = is_pf2vf_notification,
};
return adf_gen2_pfvf_send(accel_dev, msg, ¶ms);
}
static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
u32 pfvf_offset, struct mutex *csr_lock)
{
struct pfvf_gen2_params params = {
.csr_lock = csr_lock,
.pfvf_offset = pfvf_offset,
.local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
.remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
.is_notification_message = is_vf2pf_notification,
};
return adf_gen2_pfvf_send(accel_dev, msg, ¶ms);
}
static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
u32 pfvf_offset, u8 compat_ver)
{
struct pfvf_gen2_params params = {
.pfvf_offset = pfvf_offset,
.local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
.remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
.is_notification_message = is_pf2vf_notification,
.compat_ver = compat_ver,
};
return adf_gen2_pfvf_recv(accel_dev, ¶ms);
}
static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
u32 pfvf_offset, u8 compat_ver)
{
struct pfvf_gen2_params params = {
.pfvf_offset = pfvf_offset,
.local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
.remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
.is_notification_message = is_vf2pf_notification,
.compat_ver = compat_ver,
};
return adf_gen2_pfvf_recv(accel_dev, ¶ms);
}
void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
pfvf_ops->send_msg = adf_gen2_pf2vf_send;
pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
pfvf_ops->send_msg = adf_gen2_vf2pf_send;
pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_gen2_dc.h"
#include "icp_qat_fw_comp.h"
static void qat_comp_build_deflate_ctx(void *ctx)
{
struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
memset(req_tmpl, 0, sizeof(*req_tmpl));
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
QAT_COMN_PTR_TYPE_SGL);
header->serv_specif_flags =
ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
cd_pars->u.sl.comp_slice_cfg_word[0] =
ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
ICP_QAT_HW_COMPRESSION_DEPTH_1,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
req_pars->req_par_flags =
ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
ICP_QAT_FW_COMP_EOP,
ICP_QAT_FW_COMP_BFINAL,
ICP_QAT_FW_COMP_CNV,
ICP_QAT_FW_COMP_CNV_RECOVERY,
ICP_QAT_FW_COMP_NO_CNV_DFX,
ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
ICP_QAT_FW_COMP_NO_XXHASH_ACC,
ICP_QAT_FW_COMP_CNV_ERROR_NONE,
ICP_QAT_FW_COMP_NO_APPEND_CRC,
ICP_QAT_FW_COMP_NO_DROP_DATA);
ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
/* Fill second half of the template for decompression */
memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
req_tmpl++;
header = &req_tmpl->comn_hdr;
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
cd_pars = &req_tmpl->cd_pars;
cd_pars->u.sl.comp_slice_cfg_word[0] =
ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
ICP_QAT_HW_COMPRESSION_DEPTH_1,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
}
void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
{
dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
| linux-master | drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/pci.h>
#include "adf_accel_devices.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_pf_msg.h"
#include "adf_pfvf_pf_proto.h"
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_accel_vf_info *vf;
struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
}
}
int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
u8 *buffer, u8 compat)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct capabilities_v2 caps_msg;
caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
caps_msg.capabilities = hw_data->accel_capabilities_mask;
caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
caps_msg.hdr.payload_size =
ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
memcpy(buffer, &caps_msg, sizeof(caps_msg));
return 0;
}
int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
u8 *buffer, u8 compat)
{
struct ring_to_svc_map_v1 rts_map_msg;
rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
return 0;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2014 - 2022 Intel Corporation */
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "qat_bl.h"
#include "qat_crypto.h"
void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
struct qat_request_buffs *buf)
{
struct device *dev = &GET_DEV(accel_dev);
struct qat_alg_buf_list *bl = buf->bl;
struct qat_alg_buf_list *blout = buf->blout;
dma_addr_t blp = buf->blp;
dma_addr_t blpout = buf->bloutp;
size_t sz = buf->sz;
size_t sz_out = buf->sz_out;
int bl_dma_dir;
int i;
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->buffers[i].addr,
bl->buffers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
if (!buf->sgl_src_valid)
kfree(bl);
if (blp != blpout) {
for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->buffers[i].addr,
blout->buffers[i].len,
DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
if (!buf->sgl_dst_valid)
kfree(blout);
}
}
static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
dma_addr_t extra_dst_buff,
size_t sz_extra_dst_buff,
unsigned int sskip,
unsigned int dskip,
gfp_t flags)
{
struct device *dev = &GET_DEV(accel_dev);
int i, sg_nctr = 0;
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, buffers, n);
int node = dev_to_node(&GET_DEV(accel_dev));
unsigned int left;
int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
buf->sgl_src_valid = false;
buf->sgl_dst_valid = false;
if (n > QAT_MAX_BUFF_DESC) {
bufl = kzalloc_node(sz, flags, node);
if (unlikely(!bufl))
return -ENOMEM;
} else {
bufl = &buf->sgl_src.sgl_hdr;
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_src_valid = true;
}
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < n; i++)
bufl->buffers[i].addr = DMA_MAPPING_ERROR;
left = sskip;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
if (!sg->length)
continue;
if (left >= sg->length) {
left -= sg->length;
continue;
}
bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
bufl_dma_dir);
bufl->buffers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_in;
sg_nctr++;
if (left) {
bufl->buffers[y].len -= left;
left = 0;
}
}
bufl->num_bufs = sg_nctr;
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, blp)))
goto err_in;
buf->bl = bufl;
buf->blp = blp;
buf->sz = sz;
/* Handle out of place operation */
if (sgl != sglout) {
struct qat_alg_buf *buffers;
int extra_buff = extra_dst_buff ? 1 : 0;
int n_sglout = sg_nents(sglout);
n = n_sglout + extra_buff;
sz_out = struct_size(buflout, buffers, n);
left = dskip;
sg_nctr = 0;
if (n > QAT_MAX_BUFF_DESC) {
buflout = kzalloc_node(sz_out, flags, node);
if (unlikely(!buflout))
goto err_in;
} else {
buflout = &buf->sgl_dst.sgl_hdr;
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_dst_valid = true;
}
buffers = buflout->buffers;
for (i = 0; i < n; i++)
buffers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sglout, sg, n_sglout, i) {
int y = sg_nctr;
if (!sg->length)
continue;
if (left >= sg->length) {
left -= sg->length;
continue;
}
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
buffers[y].len = sg->length;
sg_nctr++;
if (left) {
buffers[y].len -= left;
left = 0;
}
}
if (extra_buff) {
buffers[sg_nctr].addr = extra_dst_buff;
buffers[sg_nctr].len = sz_extra_dst_buff;
}
buflout->num_bufs = sg_nctr;
buflout->num_bufs += extra_buff;
buflout->num_mapped_bufs = sg_nctr;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err_out;
buf->blout = buflout;
buf->bloutp = bloutp;
buf->sz_out = sz_out;
} else {
/* Otherwise set the src and dst to the same address */
buf->bloutp = buf->blp;
buf->sz_out = 0;
}
return 0;
err_out:
if (!dma_mapping_error(dev, bloutp))
dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
n = sg_nents(sglout);
for (i = 0; i < n; i++) {
if (buflout->buffers[i].addr == extra_dst_buff)
break;
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->buffers[i].len,
DMA_FROM_DEVICE);
}
if (!buf->sgl_dst_valid)
kfree(buflout);
err_in:
if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->buffers[i].addr))
dma_unmap_single(dev, bufl->buffers[i].addr,
bufl->buffers[i].len,
bufl_dma_dir);
if (!buf->sgl_src_valid)
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
}
int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
struct qat_sgl_to_bufl_params *params,
gfp_t flags)
{
dma_addr_t extra_dst_buff = 0;
size_t sz_extra_dst_buff = 0;
unsigned int sskip = 0;
unsigned int dskip = 0;
if (params) {
extra_dst_buff = params->extra_dst_buff;
sz_extra_dst_buff = params->sz_extra_dst_buff;
sskip = params->sskip;
dskip = params->dskip;
}
return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
extra_dst_buff, sz_extra_dst_buff,
sskip, dskip, flags);
}
static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
struct qat_alg_buf_list *bl)
{
struct device *dev = &GET_DEV(accel_dev);
int n = bl->num_bufs;
int i;
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bl->buffers[i].addr))
dma_unmap_single(dev, bl->buffers[i].addr,
bl->buffers[i].len, DMA_FROM_DEVICE);
}
static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct qat_alg_buf_list **bl)
{
struct device *dev = &GET_DEV(accel_dev);
struct qat_alg_buf_list *bufl;
int node = dev_to_node(dev);
struct scatterlist *sg;
int n, i, sg_nctr;
size_t sz;
n = sg_nents(sgl);
sz = struct_size(bufl, buffers, n);
bufl = kzalloc_node(sz, GFP_KERNEL, node);
if (unlikely(!bufl))
return -ENOMEM;
for (i = 0; i < n; i++)
bufl->buffers[i].addr = DMA_MAPPING_ERROR;
sg_nctr = 0;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
if (!sg->length)
continue;
bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
DMA_FROM_DEVICE);
bufl->buffers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_map;
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
bufl->num_mapped_bufs = sg_nctr;
*bl = bufl;
return 0;
err_map:
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->buffers[i].addr))
dma_unmap_single(dev, bufl->buffers[i].addr,
bufl->buffers[i].len,
DMA_FROM_DEVICE);
kfree(bufl);
*bl = NULL;
return -ENOMEM;
}
static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct qat_alg_buf_list *bl,
bool free_bl)
{
if (bl) {
qat_bl_sgl_unmap(accel_dev, bl);
if (free_bl)
kfree(bl);
}
if (sgl)
sgl_free(sgl);
}
static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
struct scatterlist **sgl,
struct qat_alg_buf_list **bl,
unsigned int dlen,
gfp_t gfp)
{
struct scatterlist *dst;
int ret;
dst = sgl_alloc(dlen, gfp, NULL);
if (!dst) {
dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
return -ENOMEM;
}
ret = qat_bl_sgl_map(accel_dev, dst, bl);
if (ret)
goto err;
*sgl = dst;
return 0;
err:
sgl_free(dst);
*sgl = NULL;
return ret;
}
int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
struct scatterlist **sg,
unsigned int dlen,
struct qat_request_buffs *qat_bufs,
gfp_t gfp)
{
struct device *dev = &GET_DEV(accel_dev);
dma_addr_t new_blp = DMA_MAPPING_ERROR;
struct qat_alg_buf_list *new_bl;
struct scatterlist *new_sg;
size_t new_bl_size;
int ret;
ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
if (ret)
return ret;
new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
/* Map new firmware SGL descriptor */
new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, new_blp)))
goto err;
/* Unmap old firmware SGL descriptor */
dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
/* Free and unmap old scatterlist */
qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
!qat_bufs->sgl_dst_valid);
qat_bufs->sgl_dst_valid = false;
qat_bufs->blout = new_bl;
qat_bufs->bloutp = new_blp;
qat_bufs->sz_out = new_bl_size;
*sg = new_sg;
return 0;
err:
qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
if (!dma_mapping_error(dev, new_blp))
dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
return -ENOMEM;
}
| linux-master | drivers/crypto/intel/qat/qat_common/qat_bl.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2021 Intel Corporation */
#include <linux/crc8.h>
#include <linux/pci.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_utils.h"
/* CRC Calculation */
DECLARE_CRC8_TABLE(pfvf_crc8_table);
#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
void adf_pfvf_crc_init(void)
{
crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
}
u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
{
return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
}
static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
u32 value, const struct pfvf_field_format *fmt)
{
if (unlikely((value & fmt->mask) != value)) {
dev_err(&GET_DEV(accel_dev),
"PFVF message value 0x%X out of range, %u max allowed\n",
value, fmt->mask);
return false;
}
*csr_msg |= value << fmt->offset;
return true;
}
u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
const struct pfvf_csr_format *fmt)
{
u32 csr_msg = 0;
if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
!set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
return 0;
return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
}
struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
const struct pfvf_csr_format *fmt)
{
struct pfvf_message msg = { 0 };
msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
if (unlikely(!msg.type))
dev_err(&GET_DEV(accel_dev),
"Invalid PFVF msg with no type received\n");
return msg;
}
| linux-master | drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
/* Worker thread to service arbiter mappings */
static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
.instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
u32 straps = self->straps;
u32 fuses = self->fuses;
u32 accel;
accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
accel &= ADF_C62X_ACCELERATORS_MASK;
return accel;
}
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
u32 straps = self->straps;
u32 fuses = self->fuses;
unsigned long disabled;
u32 ae_disable;
int accel;
/* If an accel is disabled, then disable the corresponding two AEs */
disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
ae_disable = BIT(1) | BIT(0);
for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
straps |= ae_disable << (accel << 1);
return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
}
static u32 get_ts_clock(struct adf_hw_device_data *self)
{
/*
* Timestamp update interval is 16 AE clock ticks for c62x.
*/
return self->clock_frequency / 16;
}
static int measure_clock(struct adf_accel_dev *accel_dev)
{
u32 frequency;
int ret;
ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ,
ADF_C62X_MAX_AE_FREQ);
if (ret)
return ret;
accel_dev->hw_device->clock_frequency = frequency;
return 0;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_ETR_BAR;
}
static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_SRAM_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
int aes = self->get_num_aes(self);
if (aes == 8)
return DEV_SKU_2;
else if (aes == 10)
return DEV_SKU_4;
return DEV_SKU_UNKNOWN;
}
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
return thrd_to_arb_map;
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
}
void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62x_class;
hw_data->instance_id = c62x_class.instances++;
hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
hw_data->get_num_accels = adf_gen2_get_num_accels;
hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_admin_info = adf_gen2_get_admin_info;
hw_data->get_arb_info = adf_gen2_get_arb_info;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_C62X_FW;
hw_data->fw_mmp_name = ADF_C62X_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_gen2_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
hw_data->measure_clock = measure_clock;
hw_data->get_hb_clock = get_ts_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
}
| linux-master | drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C62X_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case PCI_DEVICE_ID_INTEL_QAT_C62X:
adf_clean_hw_data_c62x(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
switch (ent->device) {
case PCI_DEVICE_ID_INTEL_QAT_C62X:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
/* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c62x(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
&hw_data->fuses);
pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
&hw_data->straps);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
dev_err(&pdev->dev, "No acceleration units found");
ret = -EFAULT;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Get accelerator capabilities mask */
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
/* Find and map all the device's BARS */
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
goto out_err_free_reg;
}
adf_dbgfs_init(accel_dev);
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_FIRMWARE(ADF_C62X_FW);
MODULE_FIRMWARE(ADF_C62X_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
| linux-master | drivers/crypto/intel/qat/qat_c62x/adf_drv.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
#include "adf_dh895xccvf_hw_data.h"
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
.type = DEV_DH895XCCVF,
.instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_DH895XCCIOV_ACCELERATORS_MASK;
}
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
return ADF_DH895XCCIOV_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
return ADF_DH895XCCIOV_MAX_ACCELERATORS;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
return ADF_DH895XCCIOV_MAX_ACCELENGINES;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCCIOV_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCCIOV_ETR_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_VF;
}
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcciov_class;
hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->dev_class->instances++;
hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}
| linux-master | drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_dbgfs.h>
#include "adf_dh895xccvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_DH895XCCVF_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
struct adf_accel_dev *pf;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
switch (ent->device) {
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table */
if (adf_devmgr_add_dev(accel_dev, pf)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
adf_dbgfs_init(accel_dev);
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
adf_flush_vf_wq(accel_dev);
adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
adf_clean_vf_map(true);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
| linux-master | drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
#include "adf_c3xxxvf_hw_data.h"
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
.instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_ACCELERATORS_MASK;
}
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_ACCELENGINES_MASK;
}
static u32 get_num_accels(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_MAX_ACCELERATORS;
}
static u32 get_num_aes(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_MAX_ACCELENGINES;
}
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_PMISC_BAR;
}
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXXIOV_ETR_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_VF;
}
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c3xxxiov_class;
hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->dev_class->instances++;
hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}
| linux-master | drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c |
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_dbgfs.h>
#include "adf_c3xxxvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C3XXXVF_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
struct adf_accel_dev *pf;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
switch (ent->device) {
case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table */
if (adf_devmgr_add_dev(accel_dev, pf)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
adf_dbgfs_init(accel_dev);
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
adf_flush_vf_wq(accel_dev);
adf_dev_down(accel_dev, false);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
adf_clean_vf_map(true);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
| linux-master | drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Keem Bay OCS HCU Crypto Driver.
*
* Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <crypto/sha2.h>
#include "ocs-hcu.h"
/* Registers. */
#define OCS_HCU_MODE 0x00
#define OCS_HCU_CHAIN 0x04
#define OCS_HCU_OPERATION 0x08
#define OCS_HCU_KEY_0 0x0C
#define OCS_HCU_ISR 0x50
#define OCS_HCU_IER 0x54
#define OCS_HCU_STATUS 0x58
#define OCS_HCU_MSG_LEN_LO 0x60
#define OCS_HCU_MSG_LEN_HI 0x64
#define OCS_HCU_KEY_BYTE_ORDER_CFG 0x80
#define OCS_HCU_DMA_SRC_ADDR 0x400
#define OCS_HCU_DMA_SRC_SIZE 0x408
#define OCS_HCU_DMA_DST_SIZE 0x40C
#define OCS_HCU_DMA_DMA_MODE 0x410
#define OCS_HCU_DMA_NEXT_SRC_DESCR 0x418
#define OCS_HCU_DMA_MSI_ISR 0x480
#define OCS_HCU_DMA_MSI_IER 0x484
#define OCS_HCU_DMA_MSI_MASK 0x488
/* Register bit definitions. */
#define HCU_MODE_ALGO_SHIFT 16
#define HCU_MODE_HMAC_SHIFT 22
#define HCU_STATUS_BUSY BIT(0)
#define HCU_BYTE_ORDER_SWAP BIT(0)
#define HCU_IRQ_HASH_DONE BIT(2)
#define HCU_IRQ_HASH_ERR_MASK (BIT(3) | BIT(1) | BIT(0))
#define HCU_DMA_IRQ_SRC_DONE BIT(0)
#define HCU_DMA_IRQ_SAI_ERR BIT(2)
#define HCU_DMA_IRQ_BAD_COMP_ERR BIT(3)
#define HCU_DMA_IRQ_INBUF_RD_ERR BIT(4)
#define HCU_DMA_IRQ_INBUF_WD_ERR BIT(5)
#define HCU_DMA_IRQ_OUTBUF_WR_ERR BIT(6)
#define HCU_DMA_IRQ_OUTBUF_RD_ERR BIT(7)
#define HCU_DMA_IRQ_CRD_ERR BIT(8)
#define HCU_DMA_IRQ_ERR_MASK (HCU_DMA_IRQ_SAI_ERR | \
HCU_DMA_IRQ_BAD_COMP_ERR | \
HCU_DMA_IRQ_INBUF_RD_ERR | \
HCU_DMA_IRQ_INBUF_WD_ERR | \
HCU_DMA_IRQ_OUTBUF_WR_ERR | \
HCU_DMA_IRQ_OUTBUF_RD_ERR | \
HCU_DMA_IRQ_CRD_ERR)
#define HCU_DMA_SNOOP_MASK (0x7 << 28)
#define HCU_DMA_SRC_LL_EN BIT(25)
#define HCU_DMA_EN BIT(31)
#define OCS_HCU_ENDIANNESS_VALUE 0x2A
#define HCU_DMA_MSI_UNMASK BIT(0)
#define HCU_DMA_MSI_DISABLE 0
#define HCU_IRQ_DISABLE 0
#define OCS_HCU_START BIT(0)
#define OCS_HCU_TERMINATE BIT(1)
#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
#define OCS_HCU_HW_KEY_LEN_U32 (OCS_HCU_HW_KEY_LEN / sizeof(u32))
#define HCU_DATA_WRITE_ENDIANNESS_OFFSET 26
#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3 (SHA256_DIGEST_SIZE / sizeof(u32))
#define OCS_HCU_NUM_CHAINS_SHA384_512 (SHA512_DIGEST_SIZE / sizeof(u32))
/*
* While polling on a busy HCU, wait maximum 200us between one check and the
* other.
*/
#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200
/* Wait on a busy HCU for maximum 1 second. */
#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
/**
* struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
* @src_addr: Source address of the data.
* @src_len: Length of data to be fetched.
* @nxt_desc: Next descriptor to fetch.
* @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
*/
struct ocs_hcu_dma_entry {
u32 src_addr;
u32 src_len;
u32 nxt_desc;
u32 ll_flags;
};
/**
* struct ocs_hcu_dma_list - OCS-specific DMA linked list.
* @head: The head of the list (points to the array backing the list).
* @tail: The current tail of the list; NULL if the list is empty.
* @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
* array).
* @max_nents: Maximum number of entries in the list (i.e., number of elements
* in the backing array).
*
* The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
* backing the list is allocated with dma_alloc_coherent() and pointed by
* @head.
*/
struct ocs_hcu_dma_list {
struct ocs_hcu_dma_entry *head;
struct ocs_hcu_dma_entry *tail;
dma_addr_t dma_addr;
size_t max_nents;
};
static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
{
switch (algo) {
case OCS_HCU_ALGO_SHA224:
case OCS_HCU_ALGO_SHA256:
case OCS_HCU_ALGO_SM3:
return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
case OCS_HCU_ALGO_SHA384:
case OCS_HCU_ALGO_SHA512:
return OCS_HCU_NUM_CHAINS_SHA384_512;
default:
return 0;
};
}
static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
{
switch (algo) {
case OCS_HCU_ALGO_SHA224:
return SHA224_DIGEST_SIZE;
case OCS_HCU_ALGO_SHA256:
case OCS_HCU_ALGO_SM3:
/* SM3 shares the same block size. */
return SHA256_DIGEST_SIZE;
case OCS_HCU_ALGO_SHA384:
return SHA384_DIGEST_SIZE;
case OCS_HCU_ALGO_SHA512:
return SHA512_DIGEST_SIZE;
default:
return 0;
}
}
/**
* ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
* @hcu_dev: OCS HCU device to wait for.
*
* Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
* expired.
*/
static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
{
long val;
return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
!(val & HCU_STATUS_BUSY),
OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
OCS_HCU_WAIT_BUSY_TIMEOUT_US);
}
static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
{
/* Clear any pending interrupts. */
writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
hcu_dev->irq_err = false;
/* Enable error and HCU done interrupts. */
writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
hcu_dev->io_base + OCS_HCU_IER);
}
static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
{
/* Clear any pending interrupts. */
writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
hcu_dev->irq_err = false;
/* Only operating on DMA source completion and error interrupts. */
writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
/* Unmask */
writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
}
static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
{
writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
}
static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
{
int rc;
rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
if (rc)
goto exit;
if (hcu_dev->irq_err) {
/* Unset flag and return error. */
hcu_dev->irq_err = false;
rc = -EIO;
goto exit;
}
exit:
ocs_hcu_irq_dis(hcu_dev);
return rc;
}
/**
* ocs_hcu_get_intermediate_data() - Get intermediate data.
* @hcu_dev: The target HCU device.
* @data: Where to store the intermediate.
* @algo: The algorithm being used.
*
* This function is used to save the current hashing process state in order to
* continue it in the future.
*
* Note: once all data has been processed, the intermediate data actually
* contains the hashing result. So this function is also used to retrieve the
* final result of a hashing process.
*
* Return: 0 on success, negative error code otherwise.
*/
static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
struct ocs_hcu_idata *data,
enum ocs_hcu_algo algo)
{
const int n = ocs_hcu_num_chains(algo);
u32 *chain;
int rc;
int i;
/* Data not requested. */
if (!data)
return -EINVAL;
chain = (u32 *)data->digest;
/* Ensure that the OCS is no longer busy before reading the chains. */
rc = ocs_hcu_wait_busy(hcu_dev);
if (rc)
return rc;
/*
* This loops is safe because data->digest is an array of
* SHA512_DIGEST_SIZE bytes and the maximum value returned by
* ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
* to SHA512_DIGEST_SIZE / sizeof(u32).
*/
for (i = 0; i < n; i++)
chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
return 0;
}
/**
* ocs_hcu_set_intermediate_data() - Set intermediate data.
* @hcu_dev: The target HCU device.
* @data: The intermediate data to be set.
* @algo: The algorithm being used.
*
* This function is used to continue a previous hashing process.
*/
static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
const struct ocs_hcu_idata *data,
enum ocs_hcu_algo algo)
{
const int n = ocs_hcu_num_chains(algo);
u32 *chain = (u32 *)data->digest;
int i;
/*
* This loops is safe because data->digest is an array of
* SHA512_DIGEST_SIZE bytes and the maximum value returned by
* ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
* to SHA512_DIGEST_SIZE / sizeof(u32).
*/
for (i = 0; i < n; i++)
writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
}
static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
{
u32 *chain;
int rc;
int i;
if (!dgst)
return -EINVAL;
/* Length of the output buffer must match the algo digest size. */
if (dgst_len != ocs_hcu_digest_size(algo))
return -EINVAL;
/* Ensure that the OCS is no longer busy before reading the chains. */
rc = ocs_hcu_wait_busy(hcu_dev);
if (rc)
return rc;
chain = (u32 *)dgst;
for (i = 0; i < dgst_len / sizeof(u32); i++)
chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
return 0;
}
/**
* ocs_hcu_hw_cfg() - Configure the HCU hardware.
* @hcu_dev: The HCU device to configure.
* @algo: The algorithm to be used by the HCU device.
* @use_hmac: Whether or not HW HMAC should be used.
*
* Return: 0 on success, negative error code otherwise.
*/
static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
bool use_hmac)
{
u32 cfg;
int rc;
if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
algo != OCS_HCU_ALGO_SM3)
return -EINVAL;
rc = ocs_hcu_wait_busy(hcu_dev);
if (rc)
return rc;
/* Ensure interrupts are disabled. */
ocs_hcu_irq_dis(hcu_dev);
/* Configure endianness, hashing algorithm and HW HMAC (if needed) */
cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
cfg |= algo << HCU_MODE_ALGO_SHIFT;
if (use_hmac)
cfg |= BIT(HCU_MODE_HMAC_SHIFT);
writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
return 0;
}
/**
* ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
* @hcu_dev: The OCS HCU device whose key registers should be cleared.
*/
static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
{
int reg_off;
/* Clear OCS_HCU_KEY_[0..15] */
for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
}
/**
* ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
* @hcu_dev: The OCS HCU device the key should be written to.
* @key: The key to be written.
* @len: The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
*
* Return: 0 on success, negative error code otherwise.
*/
static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
{
u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
int i;
if (len > OCS_HCU_HW_KEY_LEN)
return -EINVAL;
/* Copy key into temporary u32 array. */
memcpy(key_u32, key, len);
/*
* Hardware requires all the bytes of the HW Key vector to be
* written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
*/
memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
/*
* OCS hardware expects the MSB of the key to be written at the highest
* address of the HCU Key vector; in other word, the key must be
* written in reverse order.
*
* Therefore, we first enable byte swapping for the HCU key vector;
* so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
* swapped:
* 3 <---> 0, 2 <---> 1.
*/
writel(HCU_BYTE_ORDER_SWAP,
hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
/*
* And then we write the 32-bit words composing the key starting from
* the end of the key.
*/
for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
return 0;
}
/**
* ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
* @hcu_dev: The OCS HCU device to use.
* @dma_list: The OCS DMA list mapping the data to hash.
* @finalize: Whether or not this is the last hashing operation and therefore
* the final hash should be compute even if data is not
* block-aligned.
*
* Return: 0 on success, negative error code otherwise.
*/
static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
const struct ocs_hcu_dma_list *dma_list,
bool finalize)
{
u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
int rc;
if (!dma_list)
return -EINVAL;
/*
* For final requests we use HCU_DONE IRQ to be notified when all input
* data has been processed by the HCU; however, we cannot do so for
* non-final requests, because we don't get a HCU_DONE IRQ when we
* don't terminate the operation.
*
* Therefore, for non-final requests, we use the DMA IRQ, which
* triggers when DMA has finishing feeding all the input data to the
* HCU, but the HCU may still be processing it. This is fine, since we
* will wait for the HCU processing to be completed when we try to read
* intermediate results, in ocs_hcu_get_intermediate_data().
*/
if (finalize)
ocs_hcu_done_irq_en(hcu_dev);
else
ocs_hcu_dma_irq_en(hcu_dev);
reinit_completion(&hcu_dev->irq_done);
writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
if (finalize)
writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
if (rc)
return rc;
return 0;
}
struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
int max_nents)
{
struct ocs_hcu_dma_list *dma_list;
dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
if (!dma_list)
return NULL;
/* Total size of the DMA list to allocate. */
dma_list->head = dma_alloc_coherent(hcu_dev->dev,
sizeof(*dma_list->head) * max_nents,
&dma_list->dma_addr, GFP_KERNEL);
if (!dma_list->head) {
kfree(dma_list);
return NULL;
}
dma_list->max_nents = max_nents;
dma_list->tail = NULL;
return dma_list;
}
void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
struct ocs_hcu_dma_list *dma_list)
{
if (!dma_list)
return;
dma_free_coherent(hcu_dev->dev,
sizeof(*dma_list->head) * dma_list->max_nents,
dma_list->head, dma_list->dma_addr);
kfree(dma_list);
}
/* Add a new DMA entry at the end of the OCS DMA list. */
int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
struct ocs_hcu_dma_list *dma_list,
dma_addr_t addr, u32 len)
{
struct device *dev = hcu_dev->dev;
struct ocs_hcu_dma_entry *old_tail;
struct ocs_hcu_dma_entry *new_tail;
if (!len)
return 0;
if (!dma_list)
return -EINVAL;
if (addr & ~OCS_HCU_DMA_BIT_MASK) {
dev_err(dev,
"Unexpected error: Invalid DMA address for OCS HCU\n");
return -EINVAL;
}
old_tail = dma_list->tail;
new_tail = old_tail ? old_tail + 1 : dma_list->head;
/* Check if list is full. */
if (new_tail - dma_list->head >= dma_list->max_nents)
return -ENOMEM;
/*
* If there was an old tail (i.e., this is not the first element we are
* adding), un-terminate the old tail and make it point to the new one.
*/
if (old_tail) {
old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
/*
* The old tail 'nxt_desc' must point to the DMA address of the
* new tail.
*/
old_tail->nxt_desc = dma_list->dma_addr +
sizeof(*dma_list->tail) * (new_tail -
dma_list->head);
}
new_tail->src_addr = (u32)addr;
new_tail->src_len = (u32)len;
new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
new_tail->nxt_desc = 0;
/* Update list tail with new tail. */
dma_list->tail = new_tail;
return 0;
}
/**
* ocs_hcu_hash_init() - Initialize hash operation context.
* @ctx: The context to initialize.
* @algo: The hashing algorithm to use.
*
* Return: 0 on success, negative error code otherwise.
*/
int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
{
if (!ctx)
return -EINVAL;
ctx->algo = algo;
ctx->idata.msg_len_lo = 0;
ctx->idata.msg_len_hi = 0;
/* No need to set idata.digest to 0. */
return 0;
}
/**
* ocs_hcu_hash_update() - Perform a hashing iteration.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
*
* Return: 0 on success; negative error code otherwise.
*/
int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
struct ocs_hcu_hash_ctx *ctx,
const struct ocs_hcu_dma_list *dma_list)
{
int rc;
if (!hcu_dev || !ctx)
return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
if (rc)
return rc;
/* If we already processed some data, idata needs to be set. */
if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
/* Start linked-list DMA hashing. */
rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
if (rc)
return rc;
/* Update idata and return. */
return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
}
/**
* ocs_hcu_hash_finup() - Update and finalize hash computation.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
* @dgst: The buffer where to save the computed digest.
* @dgst_len: The length of @dgst.
*
* Return: 0 on success; negative error code otherwise.
*/
int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
const struct ocs_hcu_hash_ctx *ctx,
const struct ocs_hcu_dma_list *dma_list,
u8 *dgst, size_t dgst_len)
{
int rc;
if (!hcu_dev || !ctx)
return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
if (rc)
return rc;
/* If we already processed some data, idata needs to be set. */
if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
/* Start linked-list DMA hashing. */
rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
if (rc)
return rc;
/* Get digest and return. */
return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
}
/**
* ocs_hcu_hash_final() - Finalize hash computation.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dgst: The buffer where to save the computed digest.
* @dgst_len: The length of @dgst.
*
* Return: 0 on success; negative error code otherwise.
*/
int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
size_t dgst_len)
{
int rc;
if (!hcu_dev || !ctx)
return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
if (rc)
return rc;
/* If we already processed some data, idata needs to be set. */
if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
/*
* Enable HCU interrupts, so that HCU_DONE will be triggered once the
* final hash is computed.
*/
ocs_hcu_done_irq_en(hcu_dev);
reinit_completion(&hcu_dev->irq_done);
writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
if (rc)
return rc;
/* Get digest and return. */
return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
}
/**
* ocs_hcu_digest() - Compute hash digest.
* @hcu_dev: The OCS HCU device to use.
* @algo: The hash algorithm to use.
* @data: The input data to process.
* @data_len: The length of @data.
* @dgst: The buffer where to save the computed digest.
* @dgst_len: The length of @dgst.
*
* Return: 0 on success; negative error code otherwise.
*/
int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
void *data, size_t data_len, u8 *dgst, size_t dgst_len)
{
struct device *dev = hcu_dev->dev;
dma_addr_t dma_handle;
u32 reg;
int rc;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
if (rc)
return rc;
dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_handle))
return -EIO;
reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
ocs_hcu_done_irq_en(hcu_dev);
reinit_completion(&hcu_dev->irq_done);
writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
if (rc)
return rc;
dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
}
/**
* ocs_hcu_hmac() - Compute HMAC.
* @hcu_dev: The OCS HCU device to use.
* @algo: The hash algorithm to use with HMAC.
* @key: The key to use.
* @dma_list: The OCS DMA list mapping the input data to process.
* @key_len: The length of @key.
* @dgst: The buffer where to save the computed HMAC.
* @dgst_len: The length of @dgst.
*
* Return: 0 on success; negative error code otherwise.
*/
int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
const u8 *key, size_t key_len,
const struct ocs_hcu_dma_list *dma_list,
u8 *dgst, size_t dgst_len)
{
int rc;
/* Ensure 'key' is not NULL. */
if (!key || key_len == 0)
return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
if (rc)
return rc;
rc = ocs_hcu_write_key(hcu_dev, key, key_len);
if (rc)
return rc;
rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
/* Clear HW key before processing return code. */
ocs_hcu_clear_key(hcu_dev);
if (rc)
return rc;
return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
}
irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
{
struct ocs_hcu_dev *hcu_dev = dev_id;
u32 hcu_irq;
u32 dma_irq;
/* Read and clear the HCU interrupt. */
hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
/* Read and clear the HCU DMA interrupt. */
dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
/* Check for errors. */
if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
hcu_dev->irq_err = true;
goto complete;
}
/* Check for DONE IRQs. */
if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
goto complete;
return IRQ_NONE;
complete:
complete(&hcu_dev->irq_done);
return IRQ_HANDLED;
}
MODULE_LICENSE("GPL");
| linux-master | drivers/crypto/intel/keembay/ocs-hcu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Keem Bay OCS AES Crypto Driver.
*
* Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include "ocs-aes.h"
#define AES_COMMAND_OFFSET 0x0000
#define AES_KEY_0_OFFSET 0x0004
#define AES_KEY_1_OFFSET 0x0008
#define AES_KEY_2_OFFSET 0x000C
#define AES_KEY_3_OFFSET 0x0010
#define AES_KEY_4_OFFSET 0x0014
#define AES_KEY_5_OFFSET 0x0018
#define AES_KEY_6_OFFSET 0x001C
#define AES_KEY_7_OFFSET 0x0020
#define AES_IV_0_OFFSET 0x0024
#define AES_IV_1_OFFSET 0x0028
#define AES_IV_2_OFFSET 0x002C
#define AES_IV_3_OFFSET 0x0030
#define AES_ACTIVE_OFFSET 0x0034
#define AES_STATUS_OFFSET 0x0038
#define AES_KEY_SIZE_OFFSET 0x0044
#define AES_IER_OFFSET 0x0048
#define AES_ISR_OFFSET 0x005C
#define AES_MULTIPURPOSE1_0_OFFSET 0x0200
#define AES_MULTIPURPOSE1_1_OFFSET 0x0204
#define AES_MULTIPURPOSE1_2_OFFSET 0x0208
#define AES_MULTIPURPOSE1_3_OFFSET 0x020C
#define AES_MULTIPURPOSE2_0_OFFSET 0x0220
#define AES_MULTIPURPOSE2_1_OFFSET 0x0224
#define AES_MULTIPURPOSE2_2_OFFSET 0x0228
#define AES_MULTIPURPOSE2_3_OFFSET 0x022C
#define AES_BYTE_ORDER_CFG_OFFSET 0x02C0
#define AES_TLEN_OFFSET 0x0300
#define AES_T_MAC_0_OFFSET 0x0304
#define AES_T_MAC_1_OFFSET 0x0308
#define AES_T_MAC_2_OFFSET 0x030C
#define AES_T_MAC_3_OFFSET 0x0310
#define AES_PLEN_OFFSET 0x0314
#define AES_A_DMA_SRC_ADDR_OFFSET 0x0400
#define AES_A_DMA_DST_ADDR_OFFSET 0x0404
#define AES_A_DMA_SRC_SIZE_OFFSET 0x0408
#define AES_A_DMA_DST_SIZE_OFFSET 0x040C
#define AES_A_DMA_DMA_MODE_OFFSET 0x0410
#define AES_A_DMA_NEXT_SRC_DESCR_OFFSET 0x0418
#define AES_A_DMA_NEXT_DST_DESCR_OFFSET 0x041C
#define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET 0x0420
#define AES_A_DMA_LOG_OFFSET 0x0424
#define AES_A_DMA_STATUS_OFFSET 0x0428
#define AES_A_DMA_PERF_CNTR_OFFSET 0x042C
#define AES_A_DMA_MSI_ISR_OFFSET 0x0480
#define AES_A_DMA_MSI_IER_OFFSET 0x0484
#define AES_A_DMA_MSI_MASK_OFFSET 0x0488
#define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET 0x0600
#define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET 0x0700
/*
* AES_A_DMA_DMA_MODE register.
* Default: 0x00000000.
* bit[31] ACTIVE
* This bit activates the DMA. When the DMA finishes, it resets
* this bit to zero.
* bit[30:26] Unused by this driver.
* bit[25] SRC_LINK_LIST_EN
* Source link list enable bit. When the linked list is terminated
* this bit is reset by the DMA.
* bit[24] DST_LINK_LIST_EN
* Destination link list enable bit. When the linked list is
* terminated this bit is reset by the DMA.
* bit[23:0] Unused by this driver.
*/
#define AES_A_DMA_DMA_MODE_ACTIVE BIT(31)
#define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN BIT(25)
#define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN BIT(24)
/*
* AES_ACTIVE register
* default 0x00000000
* bit[31:10] Reserved
* bit[9] LAST_ADATA
* bit[8] LAST_GCX
* bit[7:2] Reserved
* bit[1] TERMINATION
* bit[0] TRIGGER
*/
#define AES_ACTIVE_LAST_ADATA BIT(9)
#define AES_ACTIVE_LAST_CCM_GCM BIT(8)
#define AES_ACTIVE_TERMINATION BIT(1)
#define AES_ACTIVE_TRIGGER BIT(0)
#define AES_DISABLE_INT 0x00000000
#define AES_DMA_CPD_ERR_INT BIT(8)
#define AES_DMA_OUTBUF_RD_ERR_INT BIT(7)
#define AES_DMA_OUTBUF_WR_ERR_INT BIT(6)
#define AES_DMA_INBUF_RD_ERR_INT BIT(5)
#define AES_DMA_INBUF_WR_ERR_INT BIT(4)
#define AES_DMA_BAD_COMP_INT BIT(3)
#define AES_DMA_SAI_INT BIT(2)
#define AES_DMA_SRC_DONE_INT BIT(0)
#define AES_COMPLETE_INT BIT(1)
#define AES_DMA_MSI_MASK_CLEAR BIT(0)
#define AES_128_BIT_KEY 0x00000000
#define AES_256_BIT_KEY BIT(0)
#define AES_DEACTIVATE_PERF_CNTR 0x00000000
#define AES_ACTIVATE_PERF_CNTR BIT(0)
#define AES_MAX_TAG_SIZE_U32 4
#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
/*
* There is an inconsistency in the documentation. This is documented as a
* 11-bit value, but it is actually 10-bits.
*/
#define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK 0x3FF
/*
* During CCM decrypt, the OCS block needs to finish processing the ciphertext
* before the tag is written. For 128-bit mode this required delay is 28 OCS
* clock cycles. For 256-bit mode it is 36 OCS clock cycles.
*/
#define CCM_DECRYPT_DELAY_TAG_CLK_COUNT 36UL
/*
* During CCM decrypt there must be a delay of at least 42 OCS clock cycles
* between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
* bit in the same register (as stated in the OCS databook)
*/
#define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT 42UL
/* See RFC3610 section 2.2 */
#define L_PRIME_MIN (1)
#define L_PRIME_MAX (7)
/*
* CCM IV format from RFC 3610 section 2.3
*
* Octet Number Contents
* ------------ ---------
* 0 Flags
* 1 ... 15-L Nonce N
* 16-L ... 15 Counter i
*
* Flags = L' = L - 1
*/
#define L_PRIME_IDX 0
#define COUNTER_START(lprime) (16 - ((lprime) + 1))
#define COUNTER_LEN(lprime) ((lprime) + 1)
enum aes_counter_mode {
AES_CTR_M_NO_INC = 0,
AES_CTR_M_32_INC = 1,
AES_CTR_M_64_INC = 2,
AES_CTR_M_128_INC = 3,
};
/**
* struct ocs_dma_linked_list - OCS DMA linked list entry.
* @src_addr: Source address of the data.
* @src_len: Length of data to be fetched.
* @next: Next dma_list to fetch.
* @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
*/
struct ocs_dma_linked_list {
u32 src_addr;
u32 src_len;
u32 next;
u32 ll_flags;
} __packed;
/*
* Set endianness of inputs and outputs
* AES_BYTE_ORDER_CFG
* default 0x00000000
* bit [10] - KEY_HI_LO_SWAP
* bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
* bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
* bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
* bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
* bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
* bit [4] - IV_SWAP_BYTES_IN_DWORD
* bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
* bit [2] - DOUT_SWAP_BYTES_IN_DWORD
* bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
* bit [0] - DOUT_SWAP_BYTES_IN_DWORD
*/
static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
{
iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
}
/* Trigger AES process start. */
static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
}
/* Indicate last bulk of data. */
static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_ACTIVE_TERMINATION,
aes_dev->base_reg + AES_ACTIVE_OFFSET);
}
/*
* Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
*
* Called when DMA is programmed to fetch the last batch of data.
* - For AES-CCM it is called for the last batch of Payload data and Ciphertext
* data.
* - For AES-GCM, it is called for the last batch of Plaintext data and
* Ciphertext data.
*/
static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_ACTIVE_LAST_CCM_GCM,
aes_dev->base_reg + AES_ACTIVE_OFFSET);
}
/* Wait for LAST_CCM_GCM bit to be unset. */
static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
{
u32 aes_active_reg;
do {
aes_active_reg = ioread32(aes_dev->base_reg +
AES_ACTIVE_OFFSET);
} while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
}
/* Wait for 10 bits of input occupancy. */
static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
{
u32 reg;
do {
reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
} while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
}
/*
* Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
* other bits).
*
* Called when DMA is programmed to fetch the last batch of Associated Data
* (CCM case) or Additional Authenticated Data (GCM case).
*/
static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
aes_dev->base_reg + AES_ACTIVE_OFFSET);
}
/* Set DMA src and dst transfer size to 0 */
static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
{
iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
}
/* Activate DMA for zero-byte transfer case. */
static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
}
/* Activate DMA and enable src linked list */
static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
}
/* Activate DMA and enable dst linked list */
static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
}
/* Activate DMA and enable src and dst linked lists */
static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
{
iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
}
/* Reset PERF_CNTR to 0 and activate it */
static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
{
iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
iowrite32(AES_ACTIVATE_PERF_CNTR,
aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
}
/* Wait until PERF_CNTR is > delay, then deactivate it */
static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
int delay)
{
while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
;
iowrite32(AES_DEACTIVATE_PERF_CNTR,
aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
}
/* Disable AES and DMA IRQ. */
static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
{
u32 isr_val = 0;
/* Disable interrupts */
iowrite32(AES_DISABLE_INT,
aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
/* Clear any pending interrupt */
isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
if (isr_val)
iowrite32(isr_val,
aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
if (isr_val)
iowrite32(isr_val,
aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
if (isr_val)
iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
}
/* Enable AES or DMA IRQ. IRQ is disabled once fired. */
static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
{
if (irq == AES_COMPLETE_INT) {
/* Ensure DMA error interrupts are enabled */
iowrite32(AES_DMA_CPD_ERR_INT |
AES_DMA_OUTBUF_RD_ERR_INT |
AES_DMA_OUTBUF_WR_ERR_INT |
AES_DMA_INBUF_RD_ERR_INT |
AES_DMA_INBUF_WR_ERR_INT |
AES_DMA_BAD_COMP_INT |
AES_DMA_SAI_INT,
aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
/*
* AES_IER
* default 0x00000000
* bits [31:3] - reserved
* bit [2] - EN_SKS_ERR
* bit [1] - EN_AES_COMPLETE
* bit [0] - reserved
*/
iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
return;
}
if (irq == AES_DMA_SRC_DONE_INT) {
/* Ensure AES interrupts are disabled */
iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
/*
* DMA_MSI_IER
* default 0x00000000
* bits [31:9] - reserved
* bit [8] - CPD_ERR_INT_EN
* bit [7] - OUTBUF_RD_ERR_INT_EN
* bit [6] - OUTBUF_WR_ERR_INT_EN
* bit [5] - INBUF_RD_ERR_INT_EN
* bit [4] - INBUF_WR_ERR_INT_EN
* bit [3] - BAD_COMP_INT_EN
* bit [2] - SAI_INT_EN
* bit [1] - DST_DONE_INT_EN
* bit [0] - SRC_DONE_INT_EN
*/
iowrite32(AES_DMA_CPD_ERR_INT |
AES_DMA_OUTBUF_RD_ERR_INT |
AES_DMA_OUTBUF_WR_ERR_INT |
AES_DMA_INBUF_RD_ERR_INT |
AES_DMA_INBUF_WR_ERR_INT |
AES_DMA_BAD_COMP_INT |
AES_DMA_SAI_INT |
AES_DMA_SRC_DONE_INT,
aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
}
}
/* Enable and wait for IRQ (either from OCS AES engine or DMA) */
static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
{
int rc;
reinit_completion(&aes_dev->irq_completion);
aes_irq_enable(aes_dev, irq);
rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
if (rc)
return rc;
return aes_dev->dma_err_mask ? -EIO : 0;
}
/* Configure DMA to OCS, linked list mode */
static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
dma_addr_t dma_list)
{
iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
iowrite32(dma_list,
aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
}
/* Configure DMA from OCS, linked list mode */
static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
dma_addr_t dma_list)
{
iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
iowrite32(dma_list,
aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
}
irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
{
struct ocs_aes_dev *aes_dev = dev_id;
u32 aes_dma_isr;
/* Read DMA ISR status. */
aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
/* Disable and clear interrupts. */
aes_irq_disable(aes_dev);
/* Save DMA error status. */
aes_dev->dma_err_mask = aes_dma_isr &
(AES_DMA_CPD_ERR_INT |
AES_DMA_OUTBUF_RD_ERR_INT |
AES_DMA_OUTBUF_WR_ERR_INT |
AES_DMA_INBUF_RD_ERR_INT |
AES_DMA_INBUF_WR_ERR_INT |
AES_DMA_BAD_COMP_INT |
AES_DMA_SAI_INT);
/* Signal IRQ completion. */
complete(&aes_dev->irq_completion);
return IRQ_HANDLED;
}
/**
* ocs_aes_set_key() - Write key into OCS AES hardware.
* @aes_dev: The OCS AES device to write the key to.
* @key_size: The size of the key (in bytes).
* @key: The key to write.
* @cipher: The cipher the key is for.
*
* For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
*
* Return: 0 on success, negative error code otherwise.
*/
int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
enum ocs_cipher cipher)
{
const u32 *key_u32;
u32 val;
int i;
/* OCS AES supports 128-bit and 256-bit keys only. */
if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
dev_err(aes_dev->dev,
"%d-bit keys not supported by AES cipher\n",
key_size * 8);
return -EINVAL;
}
/* OCS SM4 supports 128-bit keys only. */
if (cipher == OCS_SM4 && key_size != 16) {
dev_err(aes_dev->dev,
"%d-bit keys not supported for SM4 cipher\n",
key_size * 8);
return -EINVAL;
}
if (!key)
return -EINVAL;
key_u32 = (const u32 *)key;
/* Write key to AES_KEY[0-7] registers */
for (i = 0; i < (key_size / sizeof(u32)); i++) {
iowrite32(key_u32[i],
aes_dev->base_reg + AES_KEY_0_OFFSET +
(i * sizeof(u32)));
}
/*
* Write key size
* bits [31:1] - reserved
* bit [0] - AES_KEY_SIZE
* 0 - 128 bit key
* 1 - 256 bit key
*/
val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
return 0;
}
/* Write AES_COMMAND */
static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
enum ocs_cipher cipher,
enum ocs_mode mode,
enum ocs_instruction instruction)
{
u32 val;
/* AES_COMMAND
* default 0x000000CC
* bit [14] - CIPHER_SELECT
* 0 - AES
* 1 - SM4
* bits [11:8] - OCS_AES_MODE
* 0000 - ECB
* 0001 - CBC
* 0010 - CTR
* 0110 - CCM
* 0111 - GCM
* 1001 - CTS
* bits [7:6] - AES_INSTRUCTION
* 00 - ENCRYPT
* 01 - DECRYPT
* 10 - EXPAND
* 11 - BYPASS
* bits [3:2] - CTR_M_BITS
* 00 - No increment
* 01 - Least significant 32 bits are incremented
* 10 - Least significant 64 bits are incremented
* 11 - Full 128 bits are incremented
*/
val = (cipher << 14) | (mode << 8) | (instruction << 6) |
(AES_CTR_M_128_INC << 2);
iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
}
static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
enum ocs_mode mode,
enum ocs_cipher cipher,
enum ocs_instruction instruction)
{
/* Ensure interrupts are disabled and pending interrupts cleared. */
aes_irq_disable(aes_dev);
/* Set endianness recommended by data-sheet. */
aes_a_set_endianness(aes_dev);
/* Set AES_COMMAND register. */
set_ocs_aes_command(aes_dev, cipher, mode, instruction);
}
/*
* Write the byte length of the last AES/SM4 block of Payload data (without
* zero padding and without the length of the MAC) in register AES_PLEN.
*/
static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
u32 size)
{
u32 val;
if (size == 0) {
val = 0;
goto exit;
}
val = size % AES_BLOCK_SIZE;
if (val == 0)
val = AES_BLOCK_SIZE;
exit:
iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
}
/*
* Validate inputs according to mode.
* If OK return 0; else return -EINVAL.
*/
static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
const u8 *iv, u32 iv_size,
dma_addr_t aad_dma_list, u32 aad_size,
const u8 *tag, u32 tag_size,
enum ocs_cipher cipher, enum ocs_mode mode,
enum ocs_instruction instruction,
dma_addr_t dst_dma_list)
{
/* Ensure cipher, mode and instruction are valid. */
if (!(cipher == OCS_AES || cipher == OCS_SM4))
return -EINVAL;
if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
return -EINVAL;
if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
instruction != OCS_EXPAND && instruction != OCS_BYPASS)
return -EINVAL;
/*
* When instruction is OCS_BYPASS, OCS simply copies data from source
* to destination using DMA.
*
* AES mode is irrelevant, but both source and destination DMA
* linked-list must be defined.
*/
if (instruction == OCS_BYPASS) {
if (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
return 0;
}
/*
* For performance reasons switch based on mode to limit unnecessary
* conditionals for each mode
*/
switch (mode) {
case OCS_MODE_ECB:
/* Ensure input length is multiple of block size */
if (src_size % AES_BLOCK_SIZE != 0)
return -EINVAL;
/* Ensure source and destination linked lists are created */
if (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
return 0;
case OCS_MODE_CBC:
/* Ensure input length is multiple of block size */
if (src_size % AES_BLOCK_SIZE != 0)
return -EINVAL;
/* Ensure source and destination linked lists are created */
if (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
/* Ensure IV is present and block size in length */
if (!iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
return 0;
case OCS_MODE_CTR:
/* Ensure input length of 1 byte or greater */
if (src_size == 0)
return -EINVAL;
/* Ensure source and destination linked lists are created */
if (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
/* Ensure IV is present and block size in length */
if (!iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
return 0;
case OCS_MODE_CTS:
/* Ensure input length >= block size */
if (src_size < AES_BLOCK_SIZE)
return -EINVAL;
/* Ensure source and destination linked lists are created */
if (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
/* Ensure IV is present and block size in length */
if (!iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
return 0;
case OCS_MODE_GCM:
/* Ensure IV is present and GCM_AES_IV_SIZE in length */
if (!iv || iv_size != GCM_AES_IV_SIZE)
return -EINVAL;
/*
* If input data present ensure source and destination linked
* lists are created
*/
if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR))
return -EINVAL;
/* If aad present ensure aad linked list is created */
if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
/* Ensure tag destination is set */
if (!tag)
return -EINVAL;
/* Just ensure that tag_size doesn't cause overflows. */
if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
return -EINVAL;
return 0;
case OCS_MODE_CCM:
/* Ensure IV is present and block size in length */
if (!iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
/* 2 <= L <= 8, so 1 <= L' <= 7 */
if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
iv[L_PRIME_IDX] > L_PRIME_MAX)
return -EINVAL;
/* If aad present ensure aad linked list is created */
if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
/* Just ensure that tag_size doesn't cause overflows. */
if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
return -EINVAL;
if (instruction == OCS_DECRYPT) {
/*
* If input data present ensure source and destination
* linked lists are created
*/
if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
dst_dma_list == DMA_MAPPING_ERROR))
return -EINVAL;
/* Ensure input tag is present */
if (!tag)
return -EINVAL;
return 0;
}
/* Instruction == OCS_ENCRYPT */
/*
* Destination linked list always required (for tag even if no
* input data)
*/
if (dst_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
/* If input data present ensure src linked list is created */
if (src_size && src_dma_list == DMA_MAPPING_ERROR)
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
/**
* ocs_aes_op() - Perform AES/SM4 operation.
* @aes_dev: The OCS AES device to use.
* @mode: The mode to use (ECB, CBC, CTR, or CTS).
* @cipher: The cipher to use (AES or SM4).
* @instruction: The instruction to perform (encrypt or decrypt).
* @dst_dma_list: The OCS DMA list mapping output memory.
* @src_dma_list: The OCS DMA list mapping input payload data.
* @src_size: The amount of data mapped by @src_dma_list.
* @iv: The IV vector.
* @iv_size: The size (in bytes) of @iv.
*
* Return: 0 on success, negative error code otherwise.
*/
int ocs_aes_op(struct ocs_aes_dev *aes_dev,
enum ocs_mode mode,
enum ocs_cipher cipher,
enum ocs_instruction instruction,
dma_addr_t dst_dma_list,
dma_addr_t src_dma_list,
u32 src_size,
u8 *iv,
u32 iv_size)
{
u32 *iv32;
int rc;
rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
NULL, 0, cipher, mode, instruction,
dst_dma_list);
if (rc)
return rc;
/*
* ocs_aes_validate_inputs() is a generic check, now ensure mode is not
* GCM or CCM.
*/
if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
return -EINVAL;
/* Cast IV to u32 array. */
iv32 = (u32 *)iv;
ocs_aes_init(aes_dev, mode, cipher, instruction);
if (mode == OCS_MODE_CTS) {
/* Write the byte length of the last data block to engine. */
ocs_aes_write_last_data_blk_len(aes_dev, src_size);
}
/* ECB is the only mode that doesn't use IV. */
if (mode != OCS_MODE_ECB) {
iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
}
/* Set AES_ACTIVE.TRIGGER to start the operation. */
aes_a_op_trigger(aes_dev);
/* Configure and activate input / output DMA. */
dma_to_ocs_aes_ll(aes_dev, src_dma_list);
dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
aes_a_dma_active_src_dst_ll_en(aes_dev);
if (mode == OCS_MODE_CTS) {
/*
* For CTS mode, instruct engine to activate ciphertext
* stealing if last block of data is incomplete.
*/
aes_a_set_last_gcx(aes_dev);
} else {
/* For all other modes, just write the 'termination' bit. */
aes_a_op_termination(aes_dev);
}
/* Wait for engine to complete processing. */
rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
if (rc)
return rc;
if (mode == OCS_MODE_CTR) {
/* Read back IV for streaming mode */
iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
}
return 0;
}
/* Compute and write J0 to engine registers. */
static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
const u8 *iv)
{
const u32 *j0 = (u32 *)iv;
/*
* IV must be 12 bytes; Other sizes not supported as Linux crypto API
* does only expects/allows 12 byte IV for GCM
*/
iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
}
/* Read GCM tag from engine registers. */
static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
u8 *tag, u32 tag_size)
{
u32 tag_u32[AES_MAX_TAG_SIZE_U32];
/*
* The Authentication Tag T is stored in Little Endian order in the
* registers with the most significant bytes stored from AES_T_MAC[3]
* downward.
*/
tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
memcpy(tag, tag_u32, tag_size);
}
/**
* ocs_aes_gcm_op() - Perform GCM operation.
* @aes_dev: The OCS AES device to use.
* @cipher: The Cipher to use (AES or SM4).
* @instruction: The instruction to perform (encrypt or decrypt).
* @dst_dma_list: The OCS DMA list mapping output memory.
* @src_dma_list: The OCS DMA list mapping input payload data.
* @src_size: The amount of data mapped by @src_dma_list.
* @iv: The input IV vector.
* @aad_dma_list: The OCS DMA list mapping input AAD data.
* @aad_size: The amount of data mapped by @aad_dma_list.
* @out_tag: Where to store computed tag.
* @tag_size: The size (in bytes) of @out_tag.
*
* Return: 0 on success, negative error code otherwise.
*/
int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
enum ocs_cipher cipher,
enum ocs_instruction instruction,
dma_addr_t dst_dma_list,
dma_addr_t src_dma_list,
u32 src_size,
const u8 *iv,
dma_addr_t aad_dma_list,
u32 aad_size,
u8 *out_tag,
u32 tag_size)
{
u64 bit_len;
u32 val;
int rc;
rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
GCM_AES_IV_SIZE, aad_dma_list,
aad_size, out_tag, tag_size, cipher,
OCS_MODE_GCM, instruction,
dst_dma_list);
if (rc)
return rc;
ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
/* Compute and write J0 to OCS HW. */
ocs_aes_gcm_write_j0(aes_dev, iv);
/* Write out_tag byte length */
iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
/* Write the byte length of the last plaintext / ciphertext block. */
ocs_aes_write_last_data_blk_len(aes_dev, src_size);
/* Write ciphertext bit length */
bit_len = (u64)src_size * 8;
val = bit_len & 0xFFFFFFFF;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
val = bit_len >> 32;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
/* Write aad bit length */
bit_len = (u64)aad_size * 8;
val = bit_len & 0xFFFFFFFF;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
val = bit_len >> 32;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
/* Set AES_ACTIVE.TRIGGER to start the operation. */
aes_a_op_trigger(aes_dev);
/* Process AAD. */
if (aad_size) {
/* If aad present, configure DMA to feed it to the engine. */
dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
aes_a_dma_active_src_ll_en(aes_dev);
/* Instructs engine to pad last block of aad, if needed. */
aes_a_set_last_gcx_and_adata(aes_dev);
/* Wait for DMA transfer to complete. */
rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
if (rc)
return rc;
} else {
aes_a_set_last_gcx_and_adata(aes_dev);
}
/* Wait until adata (if present) has been processed. */
aes_a_wait_last_gcx(aes_dev);
aes_a_dma_wait_input_buffer_occupancy(aes_dev);
/* Now process payload. */
if (src_size) {
/* Configure and activate DMA for both input and output data. */
dma_to_ocs_aes_ll(aes_dev, src_dma_list);
dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
aes_a_dma_active_src_dst_ll_en(aes_dev);
} else {
aes_a_dma_set_xfer_size_zero(aes_dev);
aes_a_dma_active(aes_dev);
}
/* Instruct AES/SMA4 engine payload processing is over. */
aes_a_set_last_gcx(aes_dev);
/* Wait for OCS AES engine to complete processing. */
rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
if (rc)
return rc;
ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
return 0;
}
/* Write encrypted tag to AES/SM4 engine. */
static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
const u8 *in_tag, u32 tag_size)
{
int i;
/* Ensure DMA input buffer is empty */
aes_a_dma_wait_input_buffer_occupancy(aes_dev);
/*
* During CCM decrypt, the OCS block needs to finish processing the
* ciphertext before the tag is written. So delay needed after DMA has
* completed writing the ciphertext
*/
aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
/* Write encrypted tag to AES/SM4 engine. */
for (i = 0; i < tag_size; i++) {
iowrite8(in_tag[i], aes_dev->base_reg +
AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
}
}
/*
* Write B0 CCM block to OCS AES HW.
*
* Note: B0 format is documented in NIST Special Publication 800-38C
* https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
* (see Section A.2.1)
*/
static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
const u8 *iv, u32 adata_size, u32 tag_size,
u32 cryptlen)
{
u8 b0[16]; /* CCM B0 block is 16 bytes long. */
int i, q;
/* Initialize B0 to 0. */
memset(b0, 0, sizeof(b0));
/*
* B0[0] is the 'Flags Octet' and has the following structure:
* bit 7: Reserved
* bit 6: Adata flag
* bit 5-3: t value encoded as (t-2)/2
* bit 2-0: q value encoded as q - 1
*/
/* If there is AAD data, set the Adata flag. */
if (adata_size)
b0[0] |= BIT(6);
/*
* t denotes the octet length of T.
* t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
* encoded as (t - 2) / 2
*/
b0[0] |= (((tag_size - 2) / 2) & 0x7) << 3;
/*
* q is the octet length of Q.
* q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
* q - 1 == iv[0] & 0x7;
*/
b0[0] |= iv[0] & 0x7;
/*
* Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
* and must be copied to b0[1]..b0[15-q].
* q == (iv[0] & 0x7) + 1
*/
q = (iv[0] & 0x7) + 1;
for (i = 1; i <= 15 - q; i++)
b0[i] = iv[i];
/*
* The rest of B0 must contain Q, i.e., the message length.
* Q is encoded in q octets, in big-endian order, so to write it, we
* start from the end of B0 and we move backward.
*/
i = sizeof(b0) - 1;
while (q) {
b0[i] = cryptlen & 0xff;
cryptlen >>= 8;
i--;
q--;
}
/*
* If cryptlen is not zero at this point, it means that its original
* value was too big.
*/
if (cryptlen)
return -EOVERFLOW;
/* Now write B0 to OCS AES input buffer. */
for (i = 0; i < sizeof(b0); i++)
iowrite8(b0[i], aes_dev->base_reg +
AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
return 0;
}
/*
* Write adata length to OCS AES HW.
*
* Note: adata len encoding is documented in NIST Special Publication 800-38C
* https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
* (see Section A.2.2)
*/
static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
u64 adata_len)
{
u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
int i, len;
/*
* adata_len ('a') is encoded as follows:
* If 0 < a < 2^16 - 2^8 ==> 'a' encoded as [a]16, i.e., two octets
* (big endian).
* If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
* i.e., six octets (big endian).
* If 2^32 ≤ a < 2^64 ==> 'a' encoded as 0xff || 0xff || [a]64,
* i.e., ten octets (big endian).
*/
if (adata_len < 65280) {
len = 2;
*(__be16 *)enc_a = cpu_to_be16(adata_len);
} else if (adata_len <= 0xFFFFFFFF) {
len = 6;
*(__be16 *)enc_a = cpu_to_be16(0xfffe);
*(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
} else { /* adata_len >= 2^32 */
len = 10;
*(__be16 *)enc_a = cpu_to_be16(0xffff);
*(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
}
for (i = 0; i < len; i++)
iowrite8(enc_a[i],
aes_dev->base_reg +
AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
}
static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
dma_addr_t adata_dma_list, u32 adata_size)
{
int rc;
if (!adata_size) {
/* Since no aad the LAST_GCX bit can be set now */
aes_a_set_last_gcx_and_adata(aes_dev);
goto exit;
}
/* Adata case. */
/*
* Form the encoding of the Associated data length and write it
* to the AES/SM4 input buffer.
*/
ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
/* Configure the AES/SM4 DMA to fetch the Associated Data */
dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
/* Activate DMA to fetch Associated data. */
aes_a_dma_active_src_ll_en(aes_dev);
/* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
aes_a_set_last_gcx_and_adata(aes_dev);
/* Wait for DMA transfer to complete. */
rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
if (rc)
return rc;
exit:
/* Wait until adata (if present) has been processed. */
aes_a_wait_last_gcx(aes_dev);
aes_a_dma_wait_input_buffer_occupancy(aes_dev);
return 0;
}
static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
dma_addr_t dst_dma_list,
dma_addr_t src_dma_list,
u32 src_size)
{
if (src_size) {
/*
* Configure and activate DMA for both input and output
* data.
*/
dma_to_ocs_aes_ll(aes_dev, src_dma_list);
dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
aes_a_dma_active_src_dst_ll_en(aes_dev);
} else {
/* Configure and activate DMA for output data only. */
dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
aes_a_dma_active_dst_ll_en(aes_dev);
}
/*
* Set the LAST GCX bit in AES_ACTIVE Register to instruct
* AES/SM4 engine to pad the last block of data.
*/
aes_a_set_last_gcx(aes_dev);
/* We are done, wait for IRQ and return. */
return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
}
static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
dma_addr_t dst_dma_list,
dma_addr_t src_dma_list,
u32 src_size)
{
if (!src_size) {
/* Let engine process 0-length input. */
aes_a_dma_set_xfer_size_zero(aes_dev);
aes_a_dma_active(aes_dev);
aes_a_set_last_gcx(aes_dev);
return 0;
}
/*
* Configure and activate DMA for both input and output
* data.
*/
dma_to_ocs_aes_ll(aes_dev, src_dma_list);
dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
aes_a_dma_active_src_dst_ll_en(aes_dev);
/*
* Set the LAST GCX bit in AES_ACTIVE Register; this allows the
* AES/SM4 engine to differentiate between encrypted data and
* encrypted MAC.
*/
aes_a_set_last_gcx(aes_dev);
/*
* Enable DMA DONE interrupt; once DMA transfer is over,
* interrupt handler will process the MAC/tag.
*/
return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
}
/*
* Compare Tag to Yr.
*
* Only used at the end of CCM decrypt. If tag == yr, message authentication
* has succeeded.
*/
static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
u8 tag_size_bytes)
{
u32 tag[AES_MAX_TAG_SIZE_U32];
u32 yr[AES_MAX_TAG_SIZE_U32];
u8 i;
/* Read Tag and Yr from AES registers. */
for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
tag[i] = ioread32(aes_dev->base_reg +
AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
yr[i] = ioread32(aes_dev->base_reg +
AES_MULTIPURPOSE2_0_OFFSET +
(i * sizeof(u32)));
}
return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
}
/**
* ocs_aes_ccm_op() - Perform CCM operation.
* @aes_dev: The OCS AES device to use.
* @cipher: The Cipher to use (AES or SM4).
* @instruction: The instruction to perform (encrypt or decrypt).
* @dst_dma_list: The OCS DMA list mapping output memory.
* @src_dma_list: The OCS DMA list mapping input payload data.
* @src_size: The amount of data mapped by @src_dma_list.
* @iv: The input IV vector.
* @adata_dma_list: The OCS DMA list mapping input A-data.
* @adata_size: The amount of data mapped by @adata_dma_list.
* @in_tag: Input tag.
* @tag_size: The size (in bytes) of @in_tag.
*
* Note: for encrypt the tag is appended to the ciphertext (in the memory
* mapped by @dst_dma_list).
*
* Return: 0 on success, negative error code otherwise.
*/
int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
enum ocs_cipher cipher,
enum ocs_instruction instruction,
dma_addr_t dst_dma_list,
dma_addr_t src_dma_list,
u32 src_size,
u8 *iv,
dma_addr_t adata_dma_list,
u32 adata_size,
u8 *in_tag,
u32 tag_size)
{
u32 *iv_32;
u8 lprime;
int rc;
rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
AES_BLOCK_SIZE, adata_dma_list, adata_size,
in_tag, tag_size, cipher, OCS_MODE_CCM,
instruction, dst_dma_list);
if (rc)
return rc;
ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
/*
* Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
* auth tag so ensure this is the case
*/
lprime = iv[L_PRIME_IDX];
memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
/*
* Nonce is already converted to ctr0 before being passed into this
* function as iv.
*/
iv_32 = (u32 *)iv;
iowrite32(__swab32(iv_32[0]),
aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
iowrite32(__swab32(iv_32[1]),
aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
iowrite32(__swab32(iv_32[2]),
aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
iowrite32(__swab32(iv_32[3]),
aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
/* Write MAC/tag length in register AES_TLEN */
iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
/*
* Write the byte length of the last AES/SM4 block of Payload data
* (without zero padding and without the length of the MAC) in register
* AES_PLEN.
*/
ocs_aes_write_last_data_blk_len(aes_dev, src_size);
/* Set AES_ACTIVE.TRIGGER to start the operation. */
aes_a_op_trigger(aes_dev);
aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
/* Form block B0 and write it to the AES/SM4 input buffer. */
rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
if (rc)
return rc;
/*
* Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
* clock cycles since TRIGGER bit was set
*/
aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
/* Process Adata. */
ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
/* For Encrypt case we just process the payload and return. */
if (instruction == OCS_ENCRYPT) {
return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
src_dma_list, src_size);
}
/* For Decypt we need to process the payload and then the tag. */
rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
src_dma_list, src_size);
if (rc)
return rc;
/* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
if (rc)
return rc;
return ccm_compare_tag_to_yr(aes_dev, tag_size);
}
/**
* ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
* @aes_dev: The OCS AES device the list will be created for.
* @sg: The SG list OCS DMA linked list will be created from. When
* passed to this function, @sg must have been already mapped
* with dma_map_sg().
* @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
* value returned by dma_map_sg() when @sg was mapped.
* @dll_desc: The OCS DMA dma_list to use to store information about the
* created linked list.
* @data_size: The size of the data (from the SG list) to be mapped into the
* OCS DMA linked list.
* @data_offset: The offset (within the SG list) of the data to be mapped.
*
* Return: 0 on success, negative error code otherwise.
*/
int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
struct scatterlist *sg,
int sg_dma_count,
struct ocs_dll_desc *dll_desc,
size_t data_size, size_t data_offset)
{
struct ocs_dma_linked_list *ll = NULL;
struct scatterlist *sg_tmp;
unsigned int tmp;
int dma_nents;
int i;
if (!dll_desc || !sg || !aes_dev)
return -EINVAL;
/* Default values for when no ddl_desc is created. */
dll_desc->vaddr = NULL;
dll_desc->dma_addr = DMA_MAPPING_ERROR;
dll_desc->size = 0;
if (data_size == 0)
return 0;
/* Loop over sg_list until we reach entry at specified offset. */
while (data_offset >= sg_dma_len(sg)) {
data_offset -= sg_dma_len(sg);
sg_dma_count--;
sg = sg_next(sg);
/* If we reach the end of the list, offset was invalid. */
if (!sg || sg_dma_count == 0)
return -EINVAL;
}
/* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
dma_nents = 0;
tmp = 0;
sg_tmp = sg;
while (tmp < data_offset + data_size) {
/* If we reach the end of the list, data_size was invalid. */
if (!sg_tmp)
return -EINVAL;
tmp += sg_dma_len(sg_tmp);
dma_nents++;
sg_tmp = sg_next(sg_tmp);
}
if (dma_nents > sg_dma_count)
return -EINVAL;
/* Allocate the DMA list, one entry for each SG entry. */
dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
&dll_desc->dma_addr, GFP_KERNEL);
if (!dll_desc->vaddr)
return -ENOMEM;
/* Populate DMA linked list entries. */
ll = dll_desc->vaddr;
for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
ll[i].src_addr = sg_dma_address(sg) + data_offset;
ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
(sg_dma_len(sg) - data_offset) : data_size;
data_offset = 0;
data_size -= ll[i].src_len;
/* Current element points to the DMA address of the next one. */
ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
ll[i].ll_flags = 0;
}
/* Terminate last element. */
ll[i - 1].next = 0;
ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
return 0;
}
| linux-master | drivers/crypto/intel/keembay/ocs-aes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Keem Bay OCS HCU Crypto Driver.
*
* Copyright (C) 2018-2020 Intel Corporation
*/
#include <crypto/engine.h>
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha2.h>
#include <crypto/sm3.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include "ocs-hcu.h"
#define DRV_NAME "keembay-ocs-hcu"
/* Flag marking a final request. */
#define REQ_FINAL BIT(0)
/* Flag marking a HMAC request. */
#define REQ_FLAGS_HMAC BIT(1)
/* Flag set when HW HMAC is being used. */
#define REQ_FLAGS_HMAC_HW BIT(2)
/* Flag set when SW HMAC is being used. */
#define REQ_FLAGS_HMAC_SW BIT(3)
/**
* struct ocs_hcu_ctx: OCS HCU Transform context.
* @hcu_dev: The OCS HCU device used by the transformation.
* @key: The key (used only for HMAC transformations).
* @key_len: The length of the key.
* @is_sm3_tfm: Whether or not this is an SM3 transformation.
* @is_hmac_tfm: Whether or not this is a HMAC transformation.
*/
struct ocs_hcu_ctx {
struct ocs_hcu_dev *hcu_dev;
u8 key[SHA512_BLOCK_SIZE];
size_t key_len;
bool is_sm3_tfm;
bool is_hmac_tfm;
};
/**
* struct ocs_hcu_rctx - Context for the request.
* @hcu_dev: OCS HCU device to be used to service the request.
* @flags: Flags tracking request status.
* @algo: Algorithm to use for the request.
* @blk_sz: Block size of the transformation / request.
* @dig_sz: Digest size of the transformation / request.
* @dma_list: OCS DMA linked list.
* @hash_ctx: OCS HCU hashing context.
* @buffer: Buffer to store: partial block of data and SW HMAC
* artifacts (ipad, opad, etc.).
* @buf_cnt: Number of bytes currently stored in the buffer.
* @buf_dma_addr: The DMA address of @buffer (when mapped).
* @buf_dma_count: The number of bytes in @buffer currently DMA-mapped.
* @sg: Head of the scatterlist entries containing data.
* @sg_data_total: Total data in the SG list at any time.
* @sg_data_offset: Offset into the data of the current individual SG node.
* @sg_dma_nents: Number of sg entries mapped in dma_list.
*/
struct ocs_hcu_rctx {
struct ocs_hcu_dev *hcu_dev;
u32 flags;
enum ocs_hcu_algo algo;
size_t blk_sz;
size_t dig_sz;
struct ocs_hcu_dma_list *dma_list;
struct ocs_hcu_hash_ctx hash_ctx;
/*
* Buffer is double the block size because we need space for SW HMAC
* artifacts, i.e:
* - ipad (1 block) + a possible partial block of data.
* - opad (1 block) + digest of H(k ^ ipad || m)
*/
u8 buffer[2 * SHA512_BLOCK_SIZE];
size_t buf_cnt;
dma_addr_t buf_dma_addr;
size_t buf_dma_count;
struct scatterlist *sg;
unsigned int sg_data_total;
unsigned int sg_data_offset;
unsigned int sg_dma_nents;
};
/**
* struct ocs_hcu_drv - Driver data
* @dev_list: The list of HCU devices.
* @lock: The lock protecting dev_list.
*/
struct ocs_hcu_drv {
struct list_head dev_list;
spinlock_t lock; /* Protects dev_list. */
};
static struct ocs_hcu_drv ocs_hcu = {
.dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
.lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
};
/*
* Return the total amount of data in the request; that is: the data in the
* request buffer + the data in the sg list.
*/
static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
{
return rctx->sg_data_total + rctx->buf_cnt;
}
/* Move remaining content of scatter-gather list to context buffer. */
static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
{
size_t count;
if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
WARN(1, "%s: sg data does not fit in buffer\n", __func__);
return -EINVAL;
}
while (rctx->sg_data_total) {
if (!rctx->sg) {
WARN(1, "%s: unexpected NULL sg\n", __func__);
return -EINVAL;
}
/*
* If current sg has been fully processed, skip to the next
* one.
*/
if (rctx->sg_data_offset == rctx->sg->length) {
rctx->sg = sg_next(rctx->sg);
rctx->sg_data_offset = 0;
continue;
}
/*
* Determine the maximum data available to copy from the node.
* Minimum of the length left in the sg node, or the total data
* in the request.
*/
count = min(rctx->sg->length - rctx->sg_data_offset,
rctx->sg_data_total);
/* Copy from scatter-list entry to context buffer. */
scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
rctx->sg, rctx->sg_data_offset,
count, 0);
rctx->sg_data_offset += count;
rctx->sg_data_total -= count;
rctx->buf_cnt += count;
}
return 0;
}
static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
/* If the HCU device for the request was previously set, return it. */
if (tctx->hcu_dev)
return tctx->hcu_dev;
/*
* Otherwise, get the first HCU device available (there should be one
* and only one device).
*/
spin_lock_bh(&ocs_hcu.lock);
tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
struct ocs_hcu_dev,
list);
spin_unlock_bh(&ocs_hcu.lock);
return tctx->hcu_dev;
}
/* Free OCS DMA linked list and DMA-able context buffer. */
static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
struct ocs_hcu_rctx *rctx)
{
struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
struct device *dev = hcu_dev->dev;
/* Unmap rctx->buffer (if mapped). */
if (rctx->buf_dma_count) {
dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
DMA_TO_DEVICE);
rctx->buf_dma_count = 0;
}
/* Unmap req->src (if mapped). */
if (rctx->sg_dma_nents) {
dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
rctx->sg_dma_nents = 0;
}
/* Free dma_list (if allocated). */
if (rctx->dma_list) {
ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
rctx->dma_list = NULL;
}
}
/*
* Prepare for DMA operation:
* - DMA-map request context buffer (if needed)
* - DMA-map SG list (only the entries to be processed, see note below)
* - Allocate OCS HCU DMA linked list (number of elements = SG entries to
* process + context buffer (if not empty)).
* - Add DMA-mapped request context buffer to OCS HCU DMA list.
* - Add SG entries to DMA list.
*
* Note: if this is a final request, we process all the data in the SG list,
* otherwise we can only process up to the maximum amount of block-aligned data
* (the remainder will be put into the context buffer and processed in the next
* request).
*/
static int kmb_ocs_dma_prepare(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
size_t nents;
size_t count;
int rc;
int i;
/* This function should be called only when there is data to process. */
total = kmb_get_total_data(rctx);
if (!total)
return -EINVAL;
/*
* If this is not a final DMA (terminated DMA), the data passed to the
* HCU must be aligned to the block size; compute the remainder data to
* be processed in the next request.
*/
if (!(rctx->flags & REQ_FINAL))
remainder = total % rctx->blk_sz;
/* Determine the number of scatter gather list entries to process. */
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
/* If there are entries to process, map them. */
if (nents) {
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
DMA_TO_DEVICE);
if (!rctx->sg_dma_nents) {
dev_err(dev, "Failed to MAP SG\n");
rc = -ENOMEM;
goto cleanup;
}
/*
* The value returned by dma_map_sg() can be < nents; so update
* nents accordingly.
*/
nents = rctx->sg_dma_nents;
}
/*
* If context buffer is not empty, map it and add extra DMA entry for
* it.
*/
if (rctx->buf_cnt) {
rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
rctx->buf_cnt,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
dev_err(dev, "Failed to map request context buffer\n");
rc = -ENOMEM;
goto cleanup;
}
rctx->buf_dma_count = rctx->buf_cnt;
/* Increase number of dma entries. */
nents++;
}
/* Allocate OCS HCU DMA list. */
rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
if (!rctx->dma_list) {
rc = -ENOMEM;
goto cleanup;
}
/* Add request context buffer (if previously DMA-mapped) */
if (rctx->buf_dma_count) {
rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
rctx->buf_dma_addr,
rctx->buf_dma_count);
if (rc)
goto cleanup;
}
/* Add the SG nodes to be processed to the DMA linked list. */
for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
/*
* The number of bytes to add to the list entry is the minimum
* between:
* - The DMA length of the SG entry.
* - The data left to be processed.
*/
count = min(rctx->sg_data_total - remainder,
sg_dma_len(rctx->sg) - rctx->sg_data_offset);
/*
* Do not create a zero length DMA descriptor. Check in case of
* zero length SG node.
*/
if (count == 0)
continue;
/* Add sg to HCU DMA list. */
rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
rctx->dma_list,
rctx->sg->dma_address,
count);
if (rc)
goto cleanup;
/* Update amount of data remaining in SG list. */
rctx->sg_data_total -= count;
/*
* If remaining data is equal to remainder (note: 'less than'
* case should never happen in practice), we are done: update
* offset and exit the loop.
*/
if (rctx->sg_data_total <= remainder) {
WARN_ON(rctx->sg_data_total < remainder);
rctx->sg_data_offset += count;
break;
}
/*
* If we get here is because we need to process the next sg in
* the list; set offset within the sg to 0.
*/
rctx->sg_data_offset = 0;
}
return 0;
cleanup:
dev_err(dev, "Failed to prepare DMA.\n");
kmb_ocs_hcu_dma_cleanup(req, rctx);
return rc;
}
static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
/* Clear buffer of any data. */
memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
}
static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
{
struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
if (!hcu_dev)
return -ENOENT;
return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
}
static int prepare_ipad(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
int i;
WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
"%s: HMAC_SW flag is not set\n", __func__);
/*
* Key length must be equal to block size. If key is shorter,
* we pad it with zero (note: key cannot be longer, since
* longer keys are hashed by kmb_ocs_hcu_setkey()).
*/
if (ctx->key_len > rctx->blk_sz) {
WARN(1, "%s: Invalid key length in tfm context\n", __func__);
return -EINVAL;
}
memzero_explicit(&ctx->key[ctx->key_len],
rctx->blk_sz - ctx->key_len);
ctx->key_len = rctx->blk_sz;
/*
* Prepare IPAD for HMAC. Only done for first block.
* HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
* k ^ ipad will be first hashed block.
* k ^ opad will be calculated in the final request.
* Only needed if not using HW HMAC.
*/
for (i = 0; i < rctx->blk_sz; i++)
rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
rctx->buf_cnt = rctx->blk_sz;
return 0;
}
static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
int rc;
int i;
if (!hcu_dev) {
rc = -ENOENT;
goto error;
}
/*
* If hardware HMAC flag is set, perform HMAC in hardware.
*
* NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
*/
if (rctx->flags & REQ_FLAGS_HMAC_HW) {
/* Map input data into the HCU DMA linked list. */
rc = kmb_ocs_dma_prepare(req);
if (rc)
goto error;
rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
rctx->dma_list, req->result, rctx->dig_sz);
/* Unmap data and free DMA list regardless of return code. */
kmb_ocs_hcu_dma_cleanup(req, rctx);
/* Process previous return code. */
if (rc)
goto error;
goto done;
}
/* Handle update request case. */
if (!(rctx->flags & REQ_FINAL)) {
/* Update should always have input data. */
if (!kmb_get_total_data(rctx))
return -EINVAL;
/* Map input data into the HCU DMA linked list. */
rc = kmb_ocs_dma_prepare(req);
if (rc)
goto error;
/* Do hashing step. */
rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
rctx->dma_list);
/* Unmap data and free DMA list regardless of return code. */
kmb_ocs_hcu_dma_cleanup(req, rctx);
/* Process previous return code. */
if (rc)
goto error;
/*
* Reset request buffer count (data in the buffer was just
* processed).
*/
rctx->buf_cnt = 0;
/*
* Move remaining sg data into the request buffer, so that it
* will be processed during the next request.
*
* NOTE: we have remaining data if kmb_get_total_data() was not
* a multiple of block size.
*/
rc = flush_sg_to_ocs_buffer(rctx);
if (rc)
goto error;
goto done;
}
/* If we get here, this is a final request. */
/* If there is data to process, use finup. */
if (kmb_get_total_data(rctx)) {
/* Map input data into the HCU DMA linked list. */
rc = kmb_ocs_dma_prepare(req);
if (rc)
goto error;
/* Do hashing step. */
rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
rctx->dma_list,
req->result, rctx->dig_sz);
/* Free DMA list regardless of return code. */
kmb_ocs_hcu_dma_cleanup(req, rctx);
/* Process previous return code. */
if (rc)
goto error;
} else { /* Otherwise (if we have no data), use final. */
rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
rctx->dig_sz);
if (rc)
goto error;
}
/*
* If we are finalizing a SW HMAC request, we just computed the result
* of: H(k ^ ipad || m).
*
* We now need to complete the HMAC calculation with the OPAD step,
* that is, we need to compute H(k ^ opad || digest), where digest is
* the digest we just obtained, i.e., H(k ^ ipad || m).
*/
if (rctx->flags & REQ_FLAGS_HMAC_SW) {
/*
* Compute k ^ opad and store it in the request buffer (which
* is not used anymore at this point).
* Note: key has been padded / hashed already (so keylen ==
* blksz) .
*/
WARN_ON(tctx->key_len != rctx->blk_sz);
for (i = 0; i < rctx->blk_sz; i++)
rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
/* Now append the digest to the rest of the buffer. */
for (i = 0; (i < rctx->dig_sz); i++)
rctx->buffer[rctx->blk_sz + i] = req->result[i];
/* Now hash the buffer to obtain the final HMAC. */
rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
rctx->blk_sz + rctx->dig_sz, req->result,
rctx->dig_sz);
if (rc)
goto error;
}
/* Perform secure clean-up. */
kmb_ocs_hcu_secure_cleanup(req);
done:
crypto_finalize_hash_request(hcu_dev->engine, req, 0);
return 0;
error:
kmb_ocs_hcu_secure_cleanup(req);
return rc;
}
static int kmb_ocs_hcu_init(struct ahash_request *req)
{
struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
if (!hcu_dev)
return -ENOENT;
/* Initialize entire request context to zero. */
memset(rctx, 0, sizeof(*rctx));
rctx->hcu_dev = hcu_dev;
rctx->dig_sz = crypto_ahash_digestsize(tfm);
switch (rctx->dig_sz) {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
case SHA224_DIGEST_SIZE:
rctx->blk_sz = SHA224_BLOCK_SIZE;
rctx->algo = OCS_HCU_ALGO_SHA224;
break;
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
case SHA256_DIGEST_SIZE:
rctx->blk_sz = SHA256_BLOCK_SIZE;
/*
* SHA256 and SM3 have the same digest size: use info from tfm
* context to find out which one we should use.
*/
rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
OCS_HCU_ALGO_SHA256;
break;
case SHA384_DIGEST_SIZE:
rctx->blk_sz = SHA384_BLOCK_SIZE;
rctx->algo = OCS_HCU_ALGO_SHA384;
break;
case SHA512_DIGEST_SIZE:
rctx->blk_sz = SHA512_BLOCK_SIZE;
rctx->algo = OCS_HCU_ALGO_SHA512;
break;
default:
return -EINVAL;
}
/* Initialize intermediate data. */
ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
/* If this a HMAC request, set HMAC flag. */
if (ctx->is_hmac_tfm)
rctx->flags |= REQ_FLAGS_HMAC;
return 0;
}
static int kmb_ocs_hcu_update(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
int rc;
if (!req->nbytes)
return 0;
rctx->sg_data_total = req->nbytes;
rctx->sg_data_offset = 0;
rctx->sg = req->src;
/*
* If we are doing HMAC, then we must use SW-assisted HMAC, since HW
* HMAC does not support context switching (there it can only be used
* with finup() or digest()).
*/
if (rctx->flags & REQ_FLAGS_HMAC &&
!(rctx->flags & REQ_FLAGS_HMAC_SW)) {
rctx->flags |= REQ_FLAGS_HMAC_SW;
rc = prepare_ipad(req);
if (rc)
return rc;
}
/*
* If remaining sg_data fits into ctx buffer, just copy it there; we'll
* process it at the next update() or final().
*/
if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
return flush_sg_to_ocs_buffer(rctx);
return kmb_ocs_hcu_handle_queue(req);
}
/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
int rc;
rctx->flags |= REQ_FINAL;
/*
* If this is a HMAC request and, so far, we didn't have to switch to
* SW HMAC, check if we can use HW HMAC.
*/
if (rctx->flags & REQ_FLAGS_HMAC &&
!(rctx->flags & REQ_FLAGS_HMAC_SW)) {
/*
* If we are here, it means we never processed any data so far,
* so we can use HW HMAC, but only if there is some data to
* process (since OCS HW MAC does not support zero-length
* messages) and the key length is supported by the hardware
* (OCS HCU HW only supports length <= 64); if HW HMAC cannot
* be used, fall back to SW-assisted HMAC.
*/
if (kmb_get_total_data(rctx) &&
ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
rctx->flags |= REQ_FLAGS_HMAC_HW;
} else {
rctx->flags |= REQ_FLAGS_HMAC_SW;
rc = prepare_ipad(req);
if (rc)
return rc;
}
}
return kmb_ocs_hcu_handle_queue(req);
}
static int kmb_ocs_hcu_final(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
rctx->sg_data_total = 0;
rctx->sg_data_offset = 0;
rctx->sg = NULL;
return kmb_ocs_hcu_fin_common(req);
}
static int kmb_ocs_hcu_finup(struct ahash_request *req)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
rctx->sg_data_total = req->nbytes;
rctx->sg_data_offset = 0;
rctx->sg = req->src;
return kmb_ocs_hcu_fin_common(req);
}
static int kmb_ocs_hcu_digest(struct ahash_request *req)
{
int rc = 0;
struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
if (!hcu_dev)
return -ENOENT;
rc = kmb_ocs_hcu_init(req);
if (rc)
return rc;
rc = kmb_ocs_hcu_finup(req);
return rc;
}
static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
/* Intermediate data is always stored and applied per request. */
memcpy(out, rctx, sizeof(*rctx));
return 0;
}
static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
{
struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
/* Intermediate data is always stored and applied per request. */
memcpy(rctx, in, sizeof(*rctx));
return 0;
}
static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned int digestsize = crypto_ahash_digestsize(tfm);
struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
size_t blk_sz = crypto_ahash_blocksize(tfm);
struct crypto_ahash *ahash_tfm;
struct ahash_request *req;
struct crypto_wait wait;
struct scatterlist sg;
const char *alg_name;
int rc;
/*
* Key length must be equal to block size:
* - If key is shorter, we are done for now (the key will be padded
* later on); this is to maximize the use of HW HMAC (which works
* only for keys <= 64 bytes).
* - If key is longer, we hash it.
*/
if (keylen <= blk_sz) {
memcpy(ctx->key, key, keylen);
ctx->key_len = keylen;
return 0;
}
switch (digestsize) {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
case SHA224_DIGEST_SIZE:
alg_name = "sha224-keembay-ocs";
break;
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
case SHA256_DIGEST_SIZE:
alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
"sha256-keembay-ocs";
break;
case SHA384_DIGEST_SIZE:
alg_name = "sha384-keembay-ocs";
break;
case SHA512_DIGEST_SIZE:
alg_name = "sha512-keembay-ocs";
break;
default:
return -EINVAL;
}
ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
if (IS_ERR(ahash_tfm))
return PTR_ERR(ahash_tfm);
req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
if (!req) {
rc = -ENOMEM;
goto err_free_ahash;
}
crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
crypto_ahash_clear_flags(ahash_tfm, ~0);
sg_init_one(&sg, key, keylen);
ahash_request_set_crypt(req, &sg, ctx->key, keylen);
rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (rc == 0)
ctx->key_len = digestsize;
ahash_request_free(req);
err_free_ahash:
crypto_free_ahash(ahash_tfm);
return rc;
}
/* Set request size and initialize tfm context. */
static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
{
crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
sizeof(struct ocs_hcu_rctx));
}
static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
{
struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
__cra_init(tfm, ctx);
return 0;
}
static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
{
struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
__cra_init(tfm, ctx);
ctx->is_sm3_tfm = true;
return 0;
}
static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
{
struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
__cra_init(tfm, ctx);
ctx->is_sm3_tfm = true;
ctx->is_hmac_tfm = true;
return 0;
}
static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
{
struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
__cra_init(tfm, ctx);
ctx->is_hmac_tfm = true;
return 0;
}
/* Function called when 'tfm' is de-initialized. */
static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
/* Clear the key. */
memzero_explicit(ctx->key, sizeof(ctx->key));
}
static struct ahash_engine_alg ocs_hcu_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.setkey = kmb_ocs_hcu_setkey,
.base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.setkey = kmb_ocs_hcu_setkey,
.base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sm3_cra_init,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.setkey = kmb_ocs_hcu_setkey,
.base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "hmac(sm3)",
.cra_driver_name = "hmac-sm3-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.setkey = kmb_ocs_hcu_setkey,
.base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
.base.init = kmb_ocs_hcu_init,
.base.update = kmb_ocs_hcu_update,
.base.final = kmb_ocs_hcu_final,
.base.finup = kmb_ocs_hcu_finup,
.base.digest = kmb_ocs_hcu_digest,
.base.export = kmb_ocs_hcu_export,
.base.import = kmb_ocs_hcu_import,
.base.setkey = kmb_ocs_hcu_setkey,
.base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-keembay-ocs",
.cra_priority = 255,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ocs_hcu_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
},
.op.do_one_request = kmb_ocs_hcu_do_one_request,
},
};
/* Device tree driver match. */
static const struct of_device_id kmb_ocs_hcu_of_match[] = {
{
.compatible = "intel,keembay-ocs-hcu",
},
{}
};
static int kmb_ocs_hcu_remove(struct platform_device *pdev)
{
struct ocs_hcu_dev *hcu_dev;
int rc;
hcu_dev = platform_get_drvdata(pdev);
if (!hcu_dev)
return -ENODEV;
crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
rc = crypto_engine_exit(hcu_dev->engine);
spin_lock_bh(&ocs_hcu.lock);
list_del(&hcu_dev->list);
spin_unlock_bh(&ocs_hcu.lock);
return rc;
}
static int kmb_ocs_hcu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocs_hcu_dev *hcu_dev;
int rc;
hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
if (!hcu_dev)
return -ENOMEM;
hcu_dev->dev = dev;
platform_set_drvdata(pdev, hcu_dev);
rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
if (rc)
return rc;
hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hcu_dev->io_base))
return PTR_ERR(hcu_dev->io_base);
init_completion(&hcu_dev->irq_done);
/* Get and request IRQ. */
hcu_dev->irq = platform_get_irq(pdev, 0);
if (hcu_dev->irq < 0)
return hcu_dev->irq;
rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
ocs_hcu_irq_handler, NULL, 0,
"keembay-ocs-hcu", hcu_dev);
if (rc < 0) {
dev_err(dev, "Could not request IRQ.\n");
return rc;
}
INIT_LIST_HEAD(&hcu_dev->list);
spin_lock_bh(&ocs_hcu.lock);
list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
spin_unlock_bh(&ocs_hcu.lock);
/* Initialize crypto engine */
hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
if (!hcu_dev->engine) {
rc = -ENOMEM;
goto list_del;
}
rc = crypto_engine_start(hcu_dev->engine);
if (rc) {
dev_err(dev, "Could not start engine.\n");
goto cleanup;
}
/* Security infrastructure guarantees OCS clock is enabled. */
rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
if (rc) {
dev_err(dev, "Could not register algorithms.\n");
goto cleanup;
}
return 0;
cleanup:
crypto_engine_exit(hcu_dev->engine);
list_del:
spin_lock_bh(&ocs_hcu.lock);
list_del(&hcu_dev->list);
spin_unlock_bh(&ocs_hcu.lock);
return rc;
}
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_hcu_driver = {
.probe = kmb_ocs_hcu_probe,
.remove = kmb_ocs_hcu_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_hcu_of_match,
},
};
module_platform_driver(kmb_ocs_hcu_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Keem Bay OCS ECC Crypto Driver.
*
* Copyright (C) 2019-2021 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
#include <crypto/engine.h>
#include <crypto/internal/ecc.h>
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <crypto/rng.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#define DRV_NAME "keembay-ocs-ecc"
#define KMB_OCS_ECC_PRIORITY 350
#define HW_OFFS_OCS_ECC_COMMAND 0x00000000
#define HW_OFFS_OCS_ECC_STATUS 0x00000004
#define HW_OFFS_OCS_ECC_DATA_IN 0x00000080
#define HW_OFFS_OCS_ECC_CX_DATA_OUT 0x00000100
#define HW_OFFS_OCS_ECC_CY_DATA_OUT 0x00000180
#define HW_OFFS_OCS_ECC_ISR 0x00000400
#define HW_OFFS_OCS_ECC_IER 0x00000404
#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0)
#define HW_OCS_ECC_COMMAND_INS_BP BIT(0)
#define HW_OCS_ECC_COMMAND_START_VAL BIT(0)
#define OCS_ECC_OP_SIZE_384 BIT(8)
#define OCS_ECC_OP_SIZE_256 0
/* ECC Instruction : for ECC_COMMAND */
#define OCS_ECC_INST_WRITE_AX (0x1 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_WRITE_AY (0x2 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_WRITE_BX_D (0x3 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_WRITE_BY_L (0x4 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_WRITE_P (0x5 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_WRITE_A (0x6 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_CALC_D_IDX_A (0x8 << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC << HW_OCS_ECC_COMMAND_INS_BP)
#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP)
#define ECC_ENABLE_INTR 1
#define POLL_USEC 100
#define TIMEOUT_USEC 10000
#define KMB_ECC_VLI_MAX_DIGITS ECC_CURVE_NIST_P384_DIGITS
#define KMB_ECC_VLI_MAX_BYTES (KMB_ECC_VLI_MAX_DIGITS \
<< ECC_DIGITS_TO_BYTES_SHIFT)
#define POW_CUBE 3
/**
* struct ocs_ecc_dev - ECC device context
* @list: List of device contexts
* @dev: OCS ECC device
* @base_reg: IO base address of OCS ECC
* @engine: Crypto engine for the device
* @irq_done: IRQ done completion.
* @irq: IRQ number
*/
struct ocs_ecc_dev {
struct list_head list;
struct device *dev;
void __iomem *base_reg;
struct crypto_engine *engine;
struct completion irq_done;
int irq;
};
/**
* struct ocs_ecc_ctx - Transformation context.
* @ecc_dev: The ECC driver associated with this context.
* @curve: The elliptic curve used by this transformation.
* @private_key: The private key.
*/
struct ocs_ecc_ctx {
struct ocs_ecc_dev *ecc_dev;
const struct ecc_curve *curve;
u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
};
/* Driver data. */
struct ocs_ecc_drv {
struct list_head dev_list;
spinlock_t lock; /* Protects dev_list. */
};
/* Global variable holding the list of OCS ECC devices (only one expected). */
static struct ocs_ecc_drv ocs_ecc = {
.dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list),
.lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock),
};
/* Get OCS ECC tfm context from kpp_request. */
static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req)
{
return kpp_tfm_ctx(crypto_kpp_reqtfm(req));
}
/* Converts number of digits to number of bytes. */
static inline unsigned int digits_to_bytes(unsigned int n)
{
return n << ECC_DIGITS_TO_BYTES_SHIFT;
}
/*
* Wait for ECC idle i.e when an operation (other than write operations)
* is done.
*/
static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev)
{
u32 value;
return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS),
value,
!(value & HW_OCS_ECC_ISR_INT_STATUS_DONE),
POLL_USEC, TIMEOUT_USEC);
}
static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size)
{
iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL,
ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
}
/* Direct write of u32 buffer to ECC engine with associated instruction. */
static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev,
u32 op_size,
u32 inst,
const void *data_in,
size_t data_size)
{
iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
/* MMIO Write src uint32 to dst. */
memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in,
data_size);
}
/* Start OCS ECC operation and wait for its completion. */
static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size,
u32 inst)
{
reinit_completion(&ecc_dev->irq_done);
iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER);
iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
return wait_for_completion_interruptible(&ecc_dev->irq_done);
}
/**
* ocs_ecc_read_cx_out() - Read the CX data output buffer.
* @dev: The OCS ECC device to read from.
* @cx_out: The buffer where to store the CX value. Must be at least
* @byte_count byte long.
* @byte_count: The amount of data to read.
*/
static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out,
size_t byte_count)
{
memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT,
byte_count);
}
/**
* ocs_ecc_read_cy_out() - Read the CX data output buffer.
* @dev: The OCS ECC device to read from.
* @cy_out: The buffer where to store the CY value. Must be at least
* @byte_count byte long.
* @byte_count: The amount of data to read.
*/
static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out,
size_t byte_count)
{
memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT,
byte_count);
}
static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx)
{
if (tctx->ecc_dev)
return tctx->ecc_dev;
spin_lock(&ocs_ecc.lock);
/* Only a single OCS device available. */
tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev,
list);
spin_unlock(&ocs_ecc.lock);
return tctx->ecc_dev;
}
/* Do point multiplication using OCS ECC HW. */
static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
struct ecc_point *result,
const struct ecc_point *point,
u64 *scalar,
const struct ecc_curve *curve)
{
u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */
u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
size_t nbytes = digits_to_bytes(curve->g.ndigits);
int rc = 0;
/* Generate random nbytes for Simple and Differential SCA protection. */
rc = crypto_get_default_rng();
if (rc)
return rc;
rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
crypto_put_default_rng();
if (rc)
return rc;
/* Wait engine to be idle before starting new operation. */
rc = ocs_ecc_wait_idle(ecc_dev);
if (rc)
return rc;
/* Send ecc_start pulse as well as indicating operation size. */
ocs_ecc_cmd_start(ecc_dev, op_size);
/* Write ax param; Base point (Gx). */
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
point->x, nbytes);
/* Write ay param; Base point (Gy). */
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
point->y, nbytes);
/*
* Write the private key into DATA_IN reg.
*
* Since DATA_IN register is used to write different values during the
* computation private Key value is overwritten with
* side-channel-resistance value.
*/
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D,
scalar, nbytes);
/* Write operand by/l. */
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L,
sca, nbytes);
memzero_explicit(sca, sizeof(sca));
/* Write p = curve prime(GF modulus). */
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
curve->p, nbytes);
/* Write a = curve coefficient. */
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A,
curve->a, nbytes);
/* Make hardware perform the multiplication. */
rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A);
if (rc)
return rc;
/* Read result. */
ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes);
ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes);
return 0;
}
/**
* kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW.
* @ecc_dev: The OCS ECC device to use.
* @scalar_out: Where to store the output scalar.
* @scalar_a: Input scalar operand 'a'.
* @scalar_b: Input scalar operand 'b'
* @curve: The curve on which the operation is performed.
* @ndigits: The size of the operands (in digits).
* @inst: The operation to perform (as an OCS ECC instruction).
*
* Return: 0 on success, negative error code otherwise.
*/
static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out,
const u64 *scalar_a, const u64 *scalar_b,
const struct ecc_curve *curve,
unsigned int ndigits, const u32 inst)
{
u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
size_t nbytes = digits_to_bytes(ndigits);
int rc;
/* Wait engine to be idle before starting new operation. */
rc = ocs_ecc_wait_idle(ecc_dev);
if (rc)
return rc;
/* Send ecc_start pulse as well as indicating operation size. */
ocs_ecc_cmd_start(ecc_dev, op_size);
/* Write ax param (Base point (Gx).*/
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
scalar_a, nbytes);
/* Write ay param Base point (Gy).*/
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
scalar_b, nbytes);
/* Write p = curve prime(GF modulus).*/
ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
curve->p, nbytes);
/* Give instruction A.B or A+B to ECC engine. */
rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst);
if (rc)
return rc;
ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes);
if (vli_is_zero(scalar_out, ndigits))
return -EINVAL;
return 0;
}
/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev,
const struct ecc_curve *curve,
struct ecc_point *pk)
{
u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
int rc;
if (WARN_ON(pk->ndigits != curve->g.ndigits))
return -EINVAL;
/* Check 1: Verify key is not the zero point. */
if (ecc_point_is_zero(pk))
return -EINVAL;
/* Check 2: Verify key is in the range [0, p-1]. */
if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
return -EINVAL;
if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
return -EINVAL;
/* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
/* y^2 */
/* Compute y^2 -> store in yy */
rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits,
OCS_ECC_INST_CALC_A_MUL_B_MODP);
if (rc)
goto exit;
/* x^3 */
/* Assigning w = 3, used for calculating x^3. */
w[0] = POW_CUBE;
/* Load the next stage.*/
rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits,
OCS_ECC_INST_CALC_A_POW_B_MODP);
if (rc)
goto exit;
/* Do a*x -> store in w. */
rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve,
pk->ndigits,
OCS_ECC_INST_CALC_A_MUL_B_MODP);
if (rc)
goto exit;
/* Do ax + b == w + b; store in w. */
rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve,
pk->ndigits,
OCS_ECC_INST_CALC_A_ADD_B_MODP);
if (rc)
goto exit;
/* x^3 + ax + b == x^3 + w -> store in w. */
rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits,
OCS_ECC_INST_CALC_A_ADD_B_MODP);
if (rc)
goto exit;
/* Compare y^2 == x^3 + a·x + b. */
rc = vli_cmp(yy, w, pk->ndigits);
if (rc)
rc = -EINVAL;
exit:
memzero_explicit(xxx, sizeof(xxx));
memzero_explicit(yy, sizeof(yy));
memzero_explicit(w, sizeof(w));
return rc;
}
/* SP800-56A section 5.6.2.3.3 full verification */
static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev,
const struct ecc_curve *curve,
struct ecc_point *pk)
{
struct ecc_point *nQ;
int rc;
/* Checks 1 through 3 */
rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
if (rc)
return rc;
/* Check 4: Verify that nQ is the zero point. */
nQ = ecc_alloc_point(pk->ndigits);
if (!nQ)
return -ENOMEM;
rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve);
if (rc)
goto exit;
if (!ecc_point_is_zero(nQ))
rc = -EINVAL;
exit:
ecc_free_point(nQ);
return rc;
}
static int kmb_ecc_is_key_valid(const struct ecc_curve *curve,
const u64 *private_key, size_t private_key_len)
{
size_t ndigits = curve->g.ndigits;
u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1};
u64 res[KMB_ECC_VLI_MAX_DIGITS];
if (private_key_len != digits_to_bytes(ndigits))
return -EINVAL;
if (!private_key)
return -EINVAL;
/* Make sure the private key is in the range [2, n-3]. */
if (vli_cmp(one, private_key, ndigits) != -1)
return -EINVAL;
vli_sub(res, curve->n, one, ndigits);
vli_sub(res, res, one, ndigits);
if (vli_cmp(res, private_key, ndigits) != 1)
return -EINVAL;
return 0;
}
/*
* ECC private keys are generated using the method of extra random bits,
* equivalent to that described in FIPS 186-4, Appendix B.4.1.
*
* d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
* than requested
* 0 <= c mod(n-1) <= n-2 and implies that
* 1 <= d <= n-1
*
* This method generates a private key uniformly distributed in the range
* [1, n-1].
*/
static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
{
size_t nbytes = digits_to_bytes(curve->g.ndigits);
u64 priv[KMB_ECC_VLI_MAX_DIGITS];
size_t nbits;
int rc;
nbits = vli_num_bits(curve->n, curve->g.ndigits);
/* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv))
return -EINVAL;
/*
* FIPS 186-4 recommends that the private key should be obtained from a
* RBG with a security strength equal to or greater than the security
* strength associated with N.
*
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
* This condition is met by the default RNG because it selects a favored
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
return -EFAULT;
rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
if (rc)
goto cleanup;
rc = kmb_ecc_is_key_valid(curve, priv, nbytes);
if (rc)
goto cleanup;
ecc_swap_digits(priv, privkey, curve->g.ndigits);
cleanup:
memzero_explicit(&priv, sizeof(priv));
return rc;
}
static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
struct ecdh params;
int rc = 0;
rc = crypto_ecdh_decode_key(buf, len, ¶ms);
if (rc)
goto cleanup;
/* Ensure key size is not bigger then expected. */
if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) {
rc = -EINVAL;
goto cleanup;
}
/* Auto-generate private key is not provided. */
if (!params.key || !params.key_size) {
rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key);
goto cleanup;
}
rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key,
params.key_size);
if (rc)
goto cleanup;
ecc_swap_digits((const u64 *)params.key, tctx->private_key,
tctx->curve->g.ndigits);
cleanup:
memzero_explicit(¶ms, sizeof(params));
if (rc)
tctx->curve = NULL;
return rc;
}
/* Compute shared secret. */
static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx,
struct kpp_request *req)
{
struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
const struct ecc_curve *curve = tctx->curve;
u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS];
u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
size_t copied, nbytes, pubk_len;
struct ecc_point *pk, *result;
int rc;
nbytes = digits_to_bytes(curve->g.ndigits);
/* Public key is a point, thus it has two coordinates */
pubk_len = 2 * nbytes;
/* Copy public key from SG list to pubk_buf. */
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src, pubk_len),
pubk_buf, pubk_len);
if (copied != pubk_len)
return -EINVAL;
/* Allocate and initialize public key point. */
pk = ecc_alloc_point(curve->g.ndigits);
if (!pk)
return -ENOMEM;
ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits);
ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits);
/*
* Check the public key for following
* Check 1: Verify key is not the zero point.
* Check 2: Verify key is in the range [1, p-1].
* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p
*/
rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
if (rc)
goto exit_free_pk;
/* Allocate point for storing computed shared secret. */
result = ecc_alloc_point(pk->ndigits);
if (!result) {
rc = -ENOMEM;
goto exit_free_pk;
}
/* Calculate the shared secret.*/
rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve);
if (rc)
goto exit_free_result;
if (ecc_point_is_zero(result)) {
rc = -EFAULT;
goto exit_free_result;
}
/* Copy shared secret from point to buffer. */
ecc_swap_digits(result->x, shared_secret, result->ndigits);
/* Request might ask for less bytes than what we have. */
nbytes = min_t(size_t, nbytes, req->dst_len);
copied = sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, nbytes),
shared_secret, nbytes);
if (copied != nbytes)
rc = -EINVAL;
memzero_explicit(shared_secret, sizeof(shared_secret));
exit_free_result:
ecc_free_point(result);
exit_free_pk:
ecc_free_point(pk);
return rc;
}
/* Compute public key. */
static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx,
struct kpp_request *req)
{
const struct ecc_curve *curve = tctx->curve;
u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
struct ecc_point *pk;
size_t pubk_len;
size_t copied;
int rc;
/* Public key is a point, so it has double the digits. */
pubk_len = 2 * digits_to_bytes(curve->g.ndigits);
pk = ecc_alloc_point(curve->g.ndigits);
if (!pk)
return -ENOMEM;
/* Public Key(pk) = priv * G. */
rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key,
curve);
if (rc)
goto exit;
/* SP800-56A rev 3 5.6.2.1.3 key check */
if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) {
rc = -EAGAIN;
goto exit;
}
/* Copy public key from point to buffer. */
ecc_swap_digits(pk->x, pubk_buf, pk->ndigits);
ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits);
/* Copy public key to req->dst. */
copied = sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, pubk_len),
pubk_buf, pubk_len);
if (copied != pubk_len)
rc = -EINVAL;
exit:
ecc_free_point(pk);
return rc;
}
static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine,
void *areq)
{
struct kpp_request *req = container_of(areq, struct kpp_request, base);
struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
int rc;
if (req->src)
rc = kmb_ecc_do_shared_secret(tctx, req);
else
rc = kmb_ecc_do_public_key(tctx, req);
crypto_finalize_kpp_request(ecc_dev->engine, req, rc);
return 0;
}
static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req)
{
struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
const struct ecc_curve *curve = tctx->curve;
/* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
if (!tctx->curve)
return -EINVAL;
/* Ensure dst is present. */
if (!req->dst)
return -EINVAL;
/* Check the request dst is big enough to hold the public key. */
if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits)))
return -EINVAL;
/* 'src' is not supposed to be present when generate pubk is called. */
if (req->src)
return -EINVAL;
return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
req);
}
static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req)
{
struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
const struct ecc_curve *curve = tctx->curve;
/* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
if (!tctx->curve)
return -EINVAL;
/* Ensure dst is present. */
if (!req->dst)
return -EINVAL;
/* Ensure src is present. */
if (!req->src)
return -EINVAL;
/*
* req->src is expected to the (other-side) public key, so its length
* must be 2 * coordinate size (in bytes).
*/
if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits))
return -EINVAL;
return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
req);
}
static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
{
memset(tctx, 0, sizeof(*tctx));
tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx);
if (IS_ERR(tctx->ecc_dev)) {
pr_err("Failed to find the device : %ld\n",
PTR_ERR(tctx->ecc_dev));
return PTR_ERR(tctx->ecc_dev);
}
tctx->curve = ecc_get_curve(curve_id);
if (!tctx->curve)
return -EOPNOTSUPP;
return 0;
}
static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
{
struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256);
}
static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
{
struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384);
}
static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
memzero_explicit(tctx->private_key, sizeof(*tctx->private_key));
}
static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
{
struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
/* Public key is made of two coordinates, so double the digits. */
return digits_to_bytes(tctx->curve->g.ndigits) * 2;
}
static struct kpp_engine_alg ocs_ecdh_p256 = {
.base.set_secret = kmb_ocs_ecdh_set_secret,
.base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
.base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
.base.init = kmb_ocs_ecdh_nist_p256_init_tfm,
.base.exit = kmb_ocs_ecdh_exit_tfm,
.base.max_size = kmb_ocs_ecdh_max_size,
.base.base = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "ecdh-nist-p256-keembay-ocs",
.cra_priority = KMB_OCS_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ocs_ecc_ctx),
},
.op.do_one_request = kmb_ocs_ecc_do_one_request,
};
static struct kpp_engine_alg ocs_ecdh_p384 = {
.base.set_secret = kmb_ocs_ecdh_set_secret,
.base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
.base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
.base.init = kmb_ocs_ecdh_nist_p384_init_tfm,
.base.exit = kmb_ocs_ecdh_exit_tfm,
.base.max_size = kmb_ocs_ecdh_max_size,
.base.base = {
.cra_name = "ecdh-nist-p384",
.cra_driver_name = "ecdh-nist-p384-keembay-ocs",
.cra_priority = KMB_OCS_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ocs_ecc_ctx),
},
.op.do_one_request = kmb_ocs_ecc_do_one_request,
};
static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
{
struct ocs_ecc_dev *ecc_dev = dev_id;
u32 status;
/*
* Read the status register and write it back to clear the
* DONE_INT_STATUS bit.
*/
status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE))
return IRQ_NONE;
complete(&ecc_dev->irq_done);
return IRQ_HANDLED;
}
static int kmb_ocs_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocs_ecc_dev *ecc_dev;
int rc;
ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL);
if (!ecc_dev)
return -ENOMEM;
ecc_dev->dev = dev;
platform_set_drvdata(pdev, ecc_dev);
INIT_LIST_HEAD(&ecc_dev->list);
init_completion(&ecc_dev->irq_done);
/* Get base register address. */
ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ecc_dev->base_reg)) {
dev_err(dev, "Failed to get base address\n");
rc = PTR_ERR(ecc_dev->base_reg);
goto list_del;
}
/* Get and request IRQ */
ecc_dev->irq = platform_get_irq(pdev, 0);
if (ecc_dev->irq < 0) {
rc = ecc_dev->irq;
goto list_del;
}
rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler,
NULL, 0, "keembay-ocs-ecc", ecc_dev);
if (rc < 0) {
dev_err(dev, "Could not request IRQ\n");
goto list_del;
}
/* Add device to the list of OCS ECC devices. */
spin_lock(&ocs_ecc.lock);
list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list);
spin_unlock(&ocs_ecc.lock);
/* Initialize crypto engine. */
ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
if (!ecc_dev->engine) {
dev_err(dev, "Could not allocate crypto engine\n");
rc = -ENOMEM;
goto list_del;
}
rc = crypto_engine_start(ecc_dev->engine);
if (rc) {
dev_err(dev, "Could not start crypto engine\n");
goto cleanup;
}
/* Register the KPP algo. */
rc = crypto_engine_register_kpp(&ocs_ecdh_p256);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
goto cleanup;
}
rc = crypto_engine_register_kpp(&ocs_ecdh_p384);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
goto ocs_ecdh_p384_error;
}
return 0;
ocs_ecdh_p384_error:
crypto_engine_unregister_kpp(&ocs_ecdh_p256);
cleanup:
crypto_engine_exit(ecc_dev->engine);
list_del:
spin_lock(&ocs_ecc.lock);
list_del(&ecc_dev->list);
spin_unlock(&ocs_ecc.lock);
return rc;
}
static int kmb_ocs_ecc_remove(struct platform_device *pdev)
{
struct ocs_ecc_dev *ecc_dev;
ecc_dev = platform_get_drvdata(pdev);
crypto_engine_unregister_kpp(&ocs_ecdh_p384);
crypto_engine_unregister_kpp(&ocs_ecdh_p256);
spin_lock(&ocs_ecc.lock);
list_del(&ecc_dev->list);
spin_unlock(&ocs_ecc.lock);
crypto_engine_exit(ecc_dev->engine);
return 0;
}
/* Device tree driver match. */
static const struct of_device_id kmb_ocs_ecc_of_match[] = {
{
.compatible = "intel,keembay-ocs-ecc",
},
{}
};
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_ecc_driver = {
.probe = kmb_ocs_ecc_probe,
.remove = kmb_ocs_ecc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_ecc_of_match,
},
};
module_platform_driver(kmb_ocs_ecc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver");
MODULE_ALIAS_CRYPTO("ecdh-nist-p256");
MODULE_ALIAS_CRYPTO("ecdh-nist-p384");
MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs");
MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs");
| linux-master | drivers/crypto/intel/keembay/keembay-ocs-ecc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Keem Bay OCS AES Crypto Driver.
*
* Copyright (C) 2018-2020 Intel Corporation
*/
#include <crypto/aes.h>
#include <crypto/engine.h>
#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include "ocs-aes.h"
#define KMB_OCS_PRIORITY 350
#define DRV_NAME "keembay-ocs-aes"
#define OCS_AES_MIN_KEY_SIZE 16
#define OCS_AES_MAX_KEY_SIZE 32
#define OCS_AES_KEYSIZE_128 16
#define OCS_AES_KEYSIZE_192 24
#define OCS_AES_KEYSIZE_256 32
#define OCS_SM4_KEY_SIZE 16
/**
* struct ocs_aes_tctx - OCS AES Transform context
* @aes_dev: The OCS AES device.
* @key: AES/SM4 key.
* @key_len: The length (in bytes) of @key.
* @cipher: OCS cipher to use (either AES or SM4).
* @sw_cipher: The cipher to use as fallback.
* @use_fallback: Whether or not fallback cipher should be used.
*/
struct ocs_aes_tctx {
struct ocs_aes_dev *aes_dev;
u8 key[OCS_AES_KEYSIZE_256];
unsigned int key_len;
enum ocs_cipher cipher;
union {
struct crypto_sync_skcipher *sk;
struct crypto_aead *aead;
} sw_cipher;
bool use_fallback;
};
/**
* struct ocs_aes_rctx - OCS AES Request context.
* @instruction: Instruction to be executed (encrypt / decrypt).
* @mode: Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
* @src_nents: Number of source SG entries.
* @dst_nents: Number of destination SG entries.
* @src_dma_count: The number of DMA-mapped entries of the source SG.
* @dst_dma_count: The number of DMA-mapped entries of the destination SG.
* @in_place: Whether or not this is an in place request, i.e.,
* src_sg == dst_sg.
* @src_dll: OCS DMA linked list for input data.
* @dst_dll: OCS DMA linked list for output data.
* @last_ct_blk: Buffer to hold last cipher text block (only used in CBC
* mode).
* @cts_swap: Whether or not CTS swap must be performed.
* @aad_src_dll: OCS DMA linked list for input AAD data.
* @aad_dst_dll: OCS DMA linked list for output AAD data.
* @in_tag: Buffer to hold input encrypted tag (only used for
* CCM/GCM decrypt).
* @out_tag: Buffer to hold output encrypted / decrypted tag (only
* used for GCM encrypt / decrypt).
*/
struct ocs_aes_rctx {
/* Fields common across all modes. */
enum ocs_instruction instruction;
enum ocs_mode mode;
int src_nents;
int dst_nents;
int src_dma_count;
int dst_dma_count;
bool in_place;
struct ocs_dll_desc src_dll;
struct ocs_dll_desc dst_dll;
/* CBC specific */
u8 last_ct_blk[AES_BLOCK_SIZE];
/* CTS specific */
int cts_swap;
/* CCM/GCM specific */
struct ocs_dll_desc aad_src_dll;
struct ocs_dll_desc aad_dst_dll;
u8 in_tag[AES_BLOCK_SIZE];
/* GCM specific */
u8 out_tag[AES_BLOCK_SIZE];
};
/* Driver data. */
struct ocs_aes_drv {
struct list_head dev_list;
spinlock_t lock; /* Protects dev_list. */
};
static struct ocs_aes_drv ocs_aes = {
.dev_list = LIST_HEAD_INIT(ocs_aes.dev_list),
.lock = __SPIN_LOCK_UNLOCKED(ocs_aes.lock),
};
static struct ocs_aes_dev *kmb_ocs_aes_find_dev(struct ocs_aes_tctx *tctx)
{
struct ocs_aes_dev *aes_dev;
spin_lock(&ocs_aes.lock);
if (tctx->aes_dev) {
aes_dev = tctx->aes_dev;
goto exit;
}
/* Only a single OCS device available */
aes_dev = list_first_entry(&ocs_aes.dev_list, struct ocs_aes_dev, list);
tctx->aes_dev = aes_dev;
exit:
spin_unlock(&ocs_aes.lock);
return aes_dev;
}
/*
* Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
* key is being passed in.
*
* Return: 0 if key is valid, -EINVAL otherwise.
*/
static int check_key(const u8 *in_key, size_t key_len, enum ocs_cipher cipher)
{
if (!in_key)
return -EINVAL;
/* For AES, only 128-byte or 256-byte keys are supported. */
if (cipher == OCS_AES && (key_len == OCS_AES_KEYSIZE_128 ||
key_len == OCS_AES_KEYSIZE_256))
return 0;
/* For SM4, only 128-byte keys are supported. */
if (cipher == OCS_SM4 && key_len == OCS_AES_KEYSIZE_128)
return 0;
/* Everything else is unsupported. */
return -EINVAL;
}
/* Save key into transformation context. */
static int save_key(struct ocs_aes_tctx *tctx, const u8 *in_key, size_t key_len,
enum ocs_cipher cipher)
{
int ret;
ret = check_key(in_key, key_len, cipher);
if (ret)
return ret;
memcpy(tctx->key, in_key, key_len);
tctx->key_len = key_len;
tctx->cipher = cipher;
return 0;
}
/* Set key for symmetric cypher. */
static int kmb_ocs_sk_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
size_t key_len, enum ocs_cipher cipher)
{
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
/* Fallback is used for AES with 192-bit key. */
tctx->use_fallback = (cipher == OCS_AES &&
key_len == OCS_AES_KEYSIZE_192);
if (!tctx->use_fallback)
return save_key(tctx, in_key, key_len, cipher);
crypto_sync_skcipher_clear_flags(tctx->sw_cipher.sk,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(tctx->sw_cipher.sk,
tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
return crypto_sync_skcipher_setkey(tctx->sw_cipher.sk, in_key, key_len);
}
/* Set key for AEAD cipher. */
static int kmb_ocs_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
size_t key_len, enum ocs_cipher cipher)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
/* Fallback is used for AES with 192-bit key. */
tctx->use_fallback = (cipher == OCS_AES &&
key_len == OCS_AES_KEYSIZE_192);
if (!tctx->use_fallback)
return save_key(tctx, in_key, key_len, cipher);
crypto_aead_clear_flags(tctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(tctx->sw_cipher.aead,
crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(tctx->sw_cipher.aead, in_key, key_len);
}
/* Swap two AES blocks in SG lists. */
static void sg_swap_blocks(struct scatterlist *sgl, unsigned int nents,
off_t blk1_offset, off_t blk2_offset)
{
u8 tmp_buf1[AES_BLOCK_SIZE], tmp_buf2[AES_BLOCK_SIZE];
/*
* No easy way to copy within sg list, so copy both blocks to temporary
* buffers first.
*/
sg_pcopy_to_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk1_offset);
sg_pcopy_to_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk2_offset);
sg_pcopy_from_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk2_offset);
sg_pcopy_from_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk1_offset);
}
/* Initialize request context to default values. */
static void ocs_aes_init_rctx(struct ocs_aes_rctx *rctx)
{
/* Zero everything. */
memset(rctx, 0, sizeof(*rctx));
/* Set initial value for DMA addresses. */
rctx->src_dll.dma_addr = DMA_MAPPING_ERROR;
rctx->dst_dll.dma_addr = DMA_MAPPING_ERROR;
rctx->aad_src_dll.dma_addr = DMA_MAPPING_ERROR;
rctx->aad_dst_dll.dma_addr = DMA_MAPPING_ERROR;
}
static int kmb_ocs_sk_validate_input(struct skcipher_request *req,
enum ocs_mode mode)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
int iv_size = crypto_skcipher_ivsize(tfm);
switch (mode) {
case OCS_MODE_ECB:
/* Ensure input length is multiple of block size */
if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
return 0;
case OCS_MODE_CBC:
/* Ensure input length is multiple of block size */
if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
/* Ensure IV is present and block size in length */
if (!req->iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
/*
* NOTE: Since req->cryptlen == 0 case was already handled in
* kmb_ocs_sk_common(), the above two conditions also guarantee
* that: cryptlen >= iv_size
*/
return 0;
case OCS_MODE_CTR:
/* Ensure IV is present and block size in length */
if (!req->iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
return 0;
case OCS_MODE_CTS:
/* Ensure input length >= block size */
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
/* Ensure IV is present and block size in length */
if (!req->iv || iv_size != AES_BLOCK_SIZE)
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
/*
* Called by encrypt() / decrypt() skcipher functions.
*
* Use fallback if needed, otherwise initialize context and enqueue request
* into engine.
*/
static int kmb_ocs_sk_common(struct skcipher_request *req,
enum ocs_cipher cipher,
enum ocs_instruction instruction,
enum ocs_mode mode)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
struct ocs_aes_dev *aes_dev;
int rc;
if (tctx->use_fallback) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, tctx->sw_cipher.sk);
skcipher_request_set_sync_tfm(subreq, tctx->sw_cipher.sk);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen, req->iv);
if (instruction == OCS_ENCRYPT)
rc = crypto_skcipher_encrypt(subreq);
else
rc = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return rc;
}
/*
* If cryptlen == 0, no processing needed for ECB, CBC and CTR.
*
* For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
*/
if (!req->cryptlen && mode != OCS_MODE_CTS)
return 0;
rc = kmb_ocs_sk_validate_input(req, mode);
if (rc)
return rc;
aes_dev = kmb_ocs_aes_find_dev(tctx);
if (!aes_dev)
return -ENODEV;
if (cipher != tctx->cipher)
return -EINVAL;
ocs_aes_init_rctx(rctx);
rctx->instruction = instruction;
rctx->mode = mode;
return crypto_transfer_skcipher_request_to_engine(aes_dev->engine, req);
}
static void cleanup_ocs_dma_linked_list(struct device *dev,
struct ocs_dll_desc *dll)
{
if (dll->vaddr)
dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
dll->vaddr = NULL;
dll->size = 0;
dll->dma_addr = DMA_MAPPING_ERROR;
}
static void kmb_ocs_sk_dma_cleanup(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
struct device *dev = tctx->aes_dev->dev;
if (rctx->src_dma_count) {
dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
rctx->src_dma_count = 0;
}
if (rctx->dst_dma_count) {
dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
DMA_BIDIRECTIONAL :
DMA_FROM_DEVICE);
rctx->dst_dma_count = 0;
}
/* Clean up OCS DMA linked lists */
cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
}
static int kmb_ocs_sk_prepare_inplace(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
int iv_size = crypto_skcipher_ivsize(tfm);
int rc;
/*
* For CBC decrypt, save last block (iv) to last_ct_blk buffer.
*
* Note: if we are here, we already checked that cryptlen >= iv_size
* and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
* kmb_ocs_sk_validate_input().
*/
if (rctx->mode == OCS_MODE_CBC && rctx->instruction == OCS_DECRYPT)
scatterwalk_map_and_copy(rctx->last_ct_blk, req->src,
req->cryptlen - iv_size, iv_size, 0);
/* For CTS decrypt, swap last two blocks, if needed. */
if (rctx->cts_swap && rctx->instruction == OCS_DECRYPT)
sg_swap_blocks(req->dst, rctx->dst_nents,
req->cryptlen - AES_BLOCK_SIZE,
req->cryptlen - (2 * AES_BLOCK_SIZE));
/* src and dst buffers are the same, use bidirectional DMA mapping. */
rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
rctx->dst_nents, DMA_BIDIRECTIONAL);
if (rctx->dst_dma_count == 0) {
dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
return -ENOMEM;
}
/* Create DST linked list */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
rctx->dst_dma_count, &rctx->dst_dll,
req->cryptlen, 0);
if (rc)
return rc;
/*
* If descriptor creation was successful, set the src_dll.dma_addr to
* the value of dst_dll.dma_addr, as we do in-place AES operation on
* the src.
*/
rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
return 0;
}
static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
int rc;
rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (rctx->src_nents < 0)
return -EBADMSG;
/* Map SRC SG. */
rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
rctx->src_nents, DMA_TO_DEVICE);
if (rctx->src_dma_count == 0) {
dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
return -ENOMEM;
}
/* Create SRC linked list */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
rctx->src_dma_count, &rctx->src_dll,
req->cryptlen, 0);
if (rc)
return rc;
/* Map DST SG. */
rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
rctx->dst_nents, DMA_FROM_DEVICE);
if (rctx->dst_dma_count == 0) {
dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
return -ENOMEM;
}
/* Create DST linked list */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
rctx->dst_dma_count, &rctx->dst_dll,
req->cryptlen, 0);
if (rc)
return rc;
/* If this is not a CTS decrypt operation with swapping, we are done. */
if (!(rctx->cts_swap && rctx->instruction == OCS_DECRYPT))
return 0;
/*
* Otherwise, we have to copy src to dst (as we cannot modify src).
* Use OCS AES bypass mode to copy src to dst via DMA.
*
* NOTE: for anything other than small data sizes this is rather
* inefficient.
*/
rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->dst_dll.dma_addr,
rctx->src_dll.dma_addr, req->cryptlen);
if (rc)
return rc;
/*
* Now dst == src, so clean up what we did so far and use in_place
* logic.
*/
kmb_ocs_sk_dma_cleanup(req);
rctx->in_place = true;
return kmb_ocs_sk_prepare_inplace(req);
}
static int kmb_ocs_sk_run(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
struct ocs_aes_dev *aes_dev = tctx->aes_dev;
int iv_size = crypto_skcipher_ivsize(tfm);
int rc;
rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (rctx->dst_nents < 0)
return -EBADMSG;
/*
* If 2 blocks or greater, and multiple of block size swap last two
* blocks to be compatible with other crypto API CTS implementations:
* OCS mode uses CBC-CS2, whereas other crypto API implementations use
* CBC-CS3.
* CBC-CS2 and CBC-CS3 defined by:
* https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
*/
rctx->cts_swap = (rctx->mode == OCS_MODE_CTS &&
req->cryptlen > AES_BLOCK_SIZE &&
req->cryptlen % AES_BLOCK_SIZE == 0);
rctx->in_place = (req->src == req->dst);
if (rctx->in_place)
rc = kmb_ocs_sk_prepare_inplace(req);
else
rc = kmb_ocs_sk_prepare_notinplace(req);
if (rc)
goto error;
rc = ocs_aes_op(aes_dev, rctx->mode, tctx->cipher, rctx->instruction,
rctx->dst_dll.dma_addr, rctx->src_dll.dma_addr,
req->cryptlen, req->iv, iv_size);
if (rc)
goto error;
/* Clean-up DMA before further processing output. */
kmb_ocs_sk_dma_cleanup(req);
/* For CTS Encrypt, swap last 2 blocks, if needed. */
if (rctx->cts_swap && rctx->instruction == OCS_ENCRYPT) {
sg_swap_blocks(req->dst, rctx->dst_nents,
req->cryptlen - AES_BLOCK_SIZE,
req->cryptlen - (2 * AES_BLOCK_SIZE));
return 0;
}
/* For CBC copy IV to req->IV. */
if (rctx->mode == OCS_MODE_CBC) {
/* CBC encrypt case. */
if (rctx->instruction == OCS_ENCRYPT) {
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - iv_size,
iv_size, 0);
return 0;
}
/* CBC decrypt case. */
if (rctx->in_place)
memcpy(req->iv, rctx->last_ct_blk, iv_size);
else
scatterwalk_map_and_copy(req->iv, req->src,
req->cryptlen - iv_size,
iv_size, 0);
return 0;
}
/* For all other modes there's nothing to do. */
return 0;
error:
kmb_ocs_sk_dma_cleanup(req);
return rc;
}
static int kmb_ocs_aead_validate_input(struct aead_request *req,
enum ocs_instruction instruction,
enum ocs_mode mode)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int tag_size = crypto_aead_authsize(tfm);
int iv_size = crypto_aead_ivsize(tfm);
/* For decrypt crytplen == len(PT) + len(tag). */
if (instruction == OCS_DECRYPT && req->cryptlen < tag_size)
return -EINVAL;
/* IV is mandatory. */
if (!req->iv)
return -EINVAL;
switch (mode) {
case OCS_MODE_GCM:
if (iv_size != GCM_AES_IV_SIZE)
return -EINVAL;
return 0;
case OCS_MODE_CCM:
/* Ensure IV is present and block size in length */
if (iv_size != AES_BLOCK_SIZE)
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
/*
* Called by encrypt() / decrypt() aead functions.
*
* Use fallback if needed, otherwise initialize context and enqueue request
* into engine.
*/
static int kmb_ocs_aead_common(struct aead_request *req,
enum ocs_cipher cipher,
enum ocs_instruction instruction,
enum ocs_mode mode)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct ocs_aes_rctx *rctx = aead_request_ctx(req);
struct ocs_aes_dev *dd;
int rc;
if (tctx->use_fallback) {
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, tctx->sw_cipher.aead);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_ad(subreq, req->assoclen);
rc = crypto_aead_setauthsize(tctx->sw_cipher.aead,
crypto_aead_authsize(crypto_aead_reqtfm(req)));
if (rc)
return rc;
return (instruction == OCS_ENCRYPT) ?
crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
}
rc = kmb_ocs_aead_validate_input(req, instruction, mode);
if (rc)
return rc;
dd = kmb_ocs_aes_find_dev(tctx);
if (!dd)
return -ENODEV;
if (cipher != tctx->cipher)
return -EINVAL;
ocs_aes_init_rctx(rctx);
rctx->instruction = instruction;
rctx->mode = mode;
return crypto_transfer_aead_request_to_engine(dd->engine, req);
}
static void kmb_ocs_aead_dma_cleanup(struct aead_request *req)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct ocs_aes_rctx *rctx = aead_request_ctx(req);
struct device *dev = tctx->aes_dev->dev;
if (rctx->src_dma_count) {
dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
rctx->src_dma_count = 0;
}
if (rctx->dst_dma_count) {
dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
DMA_BIDIRECTIONAL :
DMA_FROM_DEVICE);
rctx->dst_dma_count = 0;
}
/* Clean up OCS DMA linked lists */
cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
cleanup_ocs_dma_linked_list(dev, &rctx->aad_src_dll);
cleanup_ocs_dma_linked_list(dev, &rctx->aad_dst_dll);
}
/**
* kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
* @req: The AEAD request being processed.
* @src_dll_size: Where to store the length of the data mapped into the
* src_dll OCS DMA list.
*
* Do the following:
* - DMA map req->src and req->dst
* - Initialize the following OCS DMA linked lists: rctx->src_dll,
* rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
*
* Return: 0 on success, negative error code otherwise.
*/
static int kmb_ocs_aead_dma_prepare(struct aead_request *req, u32 *src_dll_size)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
struct ocs_aes_rctx *rctx = aead_request_ctx(req);
u32 in_size; /* The length of the data to be mapped by src_dll. */
u32 out_size; /* The length of the data to be mapped by dst_dll. */
u32 dst_size; /* The length of the data in dst_sg. */
int rc;
/* Get number of entries in input data SG list. */
rctx->src_nents = sg_nents_for_len(req->src,
req->assoclen + req->cryptlen);
if (rctx->src_nents < 0)
return -EBADMSG;
if (rctx->instruction == OCS_DECRYPT) {
/*
* For decrypt:
* - src sg list is: AAD|CT|tag
* - dst sg list expects: AAD|PT
*
* in_size == len(CT); out_size == len(PT)
*/
/* req->cryptlen includes both CT and tag. */
in_size = req->cryptlen - tag_size;
/* out_size = PT size == CT size */
out_size = in_size;
/* len(dst_sg) == len(AAD) + len(PT) */
dst_size = req->assoclen + out_size;
/*
* Copy tag from source SG list to 'in_tag' buffer.
*
* Note: this needs to be done here, before DMA mapping src_sg.
*/
sg_pcopy_to_buffer(req->src, rctx->src_nents, rctx->in_tag,
tag_size, req->assoclen + in_size);
} else { /* OCS_ENCRYPT */
/*
* For encrypt:
* src sg list is: AAD|PT
* dst sg list expects: AAD|CT|tag
*/
/* in_size == len(PT) */
in_size = req->cryptlen;
/*
* In CCM mode the OCS engine appends the tag to the ciphertext,
* but in GCM mode the tag must be read from the tag registers
* and appended manually below
*/
out_size = (rctx->mode == OCS_MODE_CCM) ? in_size + tag_size :
in_size;
/* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
dst_size = req->assoclen + in_size + tag_size;
}
*src_dll_size = in_size;
/* Get number of entries in output data SG list. */
rctx->dst_nents = sg_nents_for_len(req->dst, dst_size);
if (rctx->dst_nents < 0)
return -EBADMSG;
rctx->in_place = (req->src == req->dst) ? 1 : 0;
/* Map destination; use bidirectional mapping for in-place case. */
rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
rctx->dst_nents,
rctx->in_place ? DMA_BIDIRECTIONAL :
DMA_FROM_DEVICE);
if (rctx->dst_dma_count == 0 && rctx->dst_nents != 0) {
dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
return -ENOMEM;
}
/* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
rctx->dst_dma_count,
&rctx->aad_dst_dll, req->assoclen,
0);
if (rc)
return rc;
/* Create DST list: maps dst[AAD_SIZE:out_size] */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
rctx->dst_dma_count, &rctx->dst_dll,
out_size, req->assoclen);
if (rc)
return rc;
if (rctx->in_place) {
/* If this is not CCM encrypt, we are done. */
if (!(rctx->mode == OCS_MODE_CCM &&
rctx->instruction == OCS_ENCRYPT)) {
/*
* SRC and DST are the same, so re-use the same DMA
* addresses (to avoid allocating new DMA lists
* identical to the dst ones).
*/
rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
rctx->aad_src_dll.dma_addr = rctx->aad_dst_dll.dma_addr;
return 0;
}
/*
* For CCM encrypt the input and output linked lists contain
* different amounts of data, so, we need to create different
* SRC and AAD SRC lists, even for the in-place case.
*/
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
rctx->dst_dma_count,
&rctx->aad_src_dll,
req->assoclen, 0);
if (rc)
return rc;
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
rctx->dst_dma_count,
&rctx->src_dll, in_size,
req->assoclen);
if (rc)
return rc;
return 0;
}
/* Not in-place case. */
/* Map source SG. */
rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
rctx->src_nents, DMA_TO_DEVICE);
if (rctx->src_dma_count == 0 && rctx->src_nents != 0) {
dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
return -ENOMEM;
}
/* Create AAD SRC list. */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
rctx->src_dma_count,
&rctx->aad_src_dll,
req->assoclen, 0);
if (rc)
return rc;
/* Create SRC list. */
rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
rctx->src_dma_count,
&rctx->src_dll, in_size,
req->assoclen);
if (rc)
return rc;
if (req->assoclen == 0)
return 0;
/* Copy AAD from src sg to dst sg using OCS DMA. */
rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->aad_dst_dll.dma_addr,
rctx->aad_src_dll.dma_addr, req->cryptlen);
if (rc)
dev_err(tctx->aes_dev->dev,
"Failed to copy source AAD to destination AAD\n");
return rc;
}
static int kmb_ocs_aead_run(struct aead_request *req)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
struct ocs_aes_rctx *rctx = aead_request_ctx(req);
u32 in_size; /* The length of the data mapped by src_dll. */
int rc;
rc = kmb_ocs_aead_dma_prepare(req, &in_size);
if (rc)
goto exit;
/* For CCM, we just call the OCS processing and we are done. */
if (rctx->mode == OCS_MODE_CCM) {
rc = ocs_aes_ccm_op(tctx->aes_dev, tctx->cipher,
rctx->instruction, rctx->dst_dll.dma_addr,
rctx->src_dll.dma_addr, in_size,
req->iv,
rctx->aad_src_dll.dma_addr, req->assoclen,
rctx->in_tag, tag_size);
goto exit;
}
/* GCM case; invoke OCS processing. */
rc = ocs_aes_gcm_op(tctx->aes_dev, tctx->cipher,
rctx->instruction,
rctx->dst_dll.dma_addr,
rctx->src_dll.dma_addr, in_size,
req->iv,
rctx->aad_src_dll.dma_addr, req->assoclen,
rctx->out_tag, tag_size);
if (rc)
goto exit;
/* For GCM decrypt, we have to compare in_tag with out_tag. */
if (rctx->instruction == OCS_DECRYPT) {
rc = memcmp(rctx->in_tag, rctx->out_tag, tag_size) ?
-EBADMSG : 0;
goto exit;
}
/* For GCM encrypt, we must manually copy out_tag to DST sg. */
/* Clean-up must be called before the sg_pcopy_from_buffer() below. */
kmb_ocs_aead_dma_cleanup(req);
/* Copy tag to destination sg after AAD and CT. */
sg_pcopy_from_buffer(req->dst, rctx->dst_nents, rctx->out_tag,
tag_size, req->assoclen + req->cryptlen);
/* Return directly as DMA cleanup already done. */
return 0;
exit:
kmb_ocs_aead_dma_cleanup(req);
return rc;
}
static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine *engine,
void *areq)
{
struct skcipher_request *req =
container_of(areq, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
int err;
if (!tctx->aes_dev) {
err = -ENODEV;
goto exit;
}
err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
tctx->cipher);
if (err)
goto exit;
err = kmb_ocs_sk_run(req);
exit:
crypto_finalize_skcipher_request(engine, req, err);
return 0;
}
static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine *engine,
void *areq)
{
struct aead_request *req = container_of(areq,
struct aead_request, base);
struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
int err;
if (!tctx->aes_dev)
return -ENODEV;
err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
tctx->cipher);
if (err)
goto exit;
err = kmb_ocs_aead_run(req);
exit:
crypto_finalize_aead_request(tctx->aes_dev->engine, req, err);
return 0;
}
static int kmb_ocs_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_AES);
}
static int kmb_ocs_aes_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
{
return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_AES);
}
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_ECB);
}
static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_ECB);
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CBC);
}
static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CBC);
}
static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTR);
}
static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTR);
}
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
static int kmb_ocs_aes_cts_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTS);
}
static int kmb_ocs_aes_cts_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTS);
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
static int kmb_ocs_aes_gcm_encrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_GCM);
}
static int kmb_ocs_aes_gcm_decrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_GCM);
}
static int kmb_ocs_aes_ccm_encrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CCM);
}
static int kmb_ocs_aes_ccm_decrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CCM);
}
static int kmb_ocs_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_SM4);
}
static int kmb_ocs_sm4_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
{
return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_SM4);
}
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_ECB);
}
static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_ECB);
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CBC);
}
static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CBC);
}
static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTR);
}
static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTR);
}
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTS);
}
static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request *req)
{
return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTS);
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
static int kmb_ocs_sm4_gcm_encrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_GCM);
}
static int kmb_ocs_sm4_gcm_decrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_GCM);
}
static int kmb_ocs_sm4_ccm_encrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CCM);
}
static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
{
return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
}
static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
/* set fallback cipher in case it will be needed */
blk = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
tctx->sw_cipher.sk = blk;
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
return 0;
}
static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
{
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
return 0;
}
static inline void clear_key(struct ocs_aes_tctx *tctx)
{
memzero_explicit(tctx->key, OCS_AES_KEYSIZE_256);
/* Zero key registers if set */
if (tctx->aes_dev)
ocs_aes_set_key(tctx->aes_dev, OCS_AES_KEYSIZE_256,
tctx->key, OCS_AES);
}
static void ocs_exit_tfm(struct crypto_skcipher *tfm)
{
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
clear_key(tctx);
if (tctx->sw_cipher.sk) {
crypto_free_sync_skcipher(tctx->sw_cipher.sk);
tctx->sw_cipher.sk = NULL;
}
}
static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
struct crypto_aead *blk;
/* Set fallback cipher in case it will be needed */
blk = crypto_alloc_aead(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
tctx->sw_cipher.aead = blk;
crypto_aead_set_reqsize(tfm,
max(sizeof(struct ocs_aes_rctx),
(sizeof(struct aead_request) +
crypto_aead_reqsize(tctx->sw_cipher.aead))));
return 0;
}
static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
switch (authsize) {
case 4:
case 6:
case 8:
case 10:
case 12:
case 14:
case 16:
return 0;
default:
return -EINVAL;
}
}
static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return crypto_gcm_check_authsize(authsize);
}
static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
{
crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
return 0;
}
static void ocs_aead_cra_exit(struct crypto_aead *tfm)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
clear_key(tctx);
if (tctx->sw_cipher.aead) {
crypto_free_aead(tctx->sw_cipher.aead);
tctx->sw_cipher.aead = NULL;
}
}
static struct skcipher_engine_alg algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
.base.base.cra_name = "ecb(aes)",
.base.base.cra_driver_name = "ecb-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_ecb_encrypt,
.base.decrypt = kmb_ocs_aes_ecb_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
.base.base.cra_name = "cbc(aes)",
.base.base.cra_driver_name = "cbc-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_cbc_encrypt,
.base.decrypt = kmb_ocs_aes_cbc_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
.base.base.cra_name = "ctr(aes)",
.base.base.cra_driver_name = "ctr-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = 1,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_ctr_encrypt,
.base.decrypt = kmb_ocs_aes_ctr_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
.base.base.cra_name = "cts(cbc(aes))",
.base.base.cra_driver_name = "cts-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_cts_encrypt,
.base.decrypt = kmb_ocs_aes_cts_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
.base.base.cra_name = "ecb(sm4)",
.base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_ecb_encrypt,
.base.decrypt = kmb_ocs_sm4_ecb_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
.base.base.cra_name = "cbc(sm4)",
.base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_cbc_encrypt,
.base.decrypt = kmb_ocs_sm4_cbc_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
.base.base.cra_name = "ctr(sm4)",
.base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = 1,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_ctr_encrypt,
.base.decrypt = kmb_ocs_sm4_ctr_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
.base.base.cra_name = "cts(cbc(sm4))",
.base.base.cra_driver_name = "cts-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_cts_encrypt,
.base.decrypt = kmb_ocs_sm4_cts_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
};
static struct aead_engine_alg algs_aead[] = {
{
.base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.base.init = ocs_aes_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
.base.setkey = kmb_ocs_aes_aead_set_key,
.base.encrypt = kmb_ocs_aes_gcm_encrypt,
.base.decrypt = kmb_ocs_aes_gcm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
.base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.base.init = ocs_aes_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
.base.setkey = kmb_ocs_aes_aead_set_key,
.base.encrypt = kmb_ocs_aes_ccm_encrypt,
.base.decrypt = kmb_ocs_aes_ccm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
.base.base = {
.cra_name = "gcm(sm4)",
.cra_driver_name = "gcm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.base.init = ocs_sm4_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
.base.setkey = kmb_ocs_sm4_aead_set_key,
.base.encrypt = kmb_ocs_sm4_gcm_encrypt,
.base.decrypt = kmb_ocs_sm4_gcm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
.base.base = {
.cra_name = "ccm(sm4)",
.cra_driver_name = "ccm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.base.init = ocs_sm4_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
.base.setkey = kmb_ocs_sm4_aead_set_key,
.base.encrypt = kmb_ocs_sm4_ccm_encrypt,
.base.decrypt = kmb_ocs_sm4_ccm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
}
};
static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
{
crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
static int register_aes_algs(struct ocs_aes_dev *aes_dev)
{
int ret;
/*
* If any algorithm fails to register, all preceding algorithms that
* were successfully registered will be automatically unregistered.
*/
ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
if (ret)
return ret;
ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
if (ret)
crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
return ret;
}
/* Device tree driver match. */
static const struct of_device_id kmb_ocs_aes_of_match[] = {
{
.compatible = "intel,keembay-ocs-aes",
},
{}
};
static int kmb_ocs_aes_remove(struct platform_device *pdev)
{
struct ocs_aes_dev *aes_dev;
aes_dev = platform_get_drvdata(pdev);
unregister_aes_algs(aes_dev);
spin_lock(&ocs_aes.lock);
list_del(&aes_dev->list);
spin_unlock(&ocs_aes.lock);
crypto_engine_exit(aes_dev->engine);
return 0;
}
static int kmb_ocs_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocs_aes_dev *aes_dev;
int rc;
aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
if (!aes_dev)
return -ENOMEM;
aes_dev->dev = dev;
platform_set_drvdata(pdev, aes_dev);
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "Failed to set 32 bit dma mask %d\n", rc);
return rc;
}
/* Get base register address. */
aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aes_dev->base_reg))
return PTR_ERR(aes_dev->base_reg);
/* Get and request IRQ */
aes_dev->irq = platform_get_irq(pdev, 0);
if (aes_dev->irq < 0)
return aes_dev->irq;
rc = devm_request_threaded_irq(dev, aes_dev->irq, ocs_aes_irq_handler,
NULL, 0, "keembay-ocs-aes", aes_dev);
if (rc < 0) {
dev_err(dev, "Could not request IRQ\n");
return rc;
}
INIT_LIST_HEAD(&aes_dev->list);
spin_lock(&ocs_aes.lock);
list_add_tail(&aes_dev->list, &ocs_aes.dev_list);
spin_unlock(&ocs_aes.lock);
init_completion(&aes_dev->irq_completion);
/* Initialize crypto engine */
aes_dev->engine = crypto_engine_alloc_init(dev, true);
if (!aes_dev->engine) {
rc = -ENOMEM;
goto list_del;
}
rc = crypto_engine_start(aes_dev->engine);
if (rc) {
dev_err(dev, "Could not start crypto engine\n");
goto cleanup;
}
rc = register_aes_algs(aes_dev);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
goto cleanup;
}
return 0;
cleanup:
crypto_engine_exit(aes_dev->engine);
list_del:
spin_lock(&ocs_aes.lock);
list_del(&aes_dev->list);
spin_unlock(&ocs_aes.lock);
return rc;
}
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_aes_driver = {
.probe = kmb_ocs_aes_probe,
.remove = kmb_ocs_aes_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_aes_of_match,
},
};
module_platform_driver(kmb_ocs_aes_driver);
MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
| linux-master | drivers/crypto/intel/keembay/keembay-ocs-aes-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IXP4xx NPE-C crypto driver
*
* Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/of.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/aes.h>
#include <crypto/hmac.h>
#include <crypto/sha1.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
/* Intermittent includes, delete this after v5.14-rc1 */
#include <linux/soc/ixp4xx/cpu.h>
#define MAX_KEYLEN 32
/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
#define NPE_CTX_LEN 80
#define AES_BLOCK128 16
#define NPE_OP_HASH_VERIFY 0x01
#define NPE_OP_CCM_ENABLE 0x04
#define NPE_OP_CRYPT_ENABLE 0x08
#define NPE_OP_HASH_ENABLE 0x10
#define NPE_OP_NOT_IN_PLACE 0x20
#define NPE_OP_HMAC_DISABLE 0x40
#define NPE_OP_CRYPT_ENCRYPT 0x80
#define NPE_OP_CCM_GEN_MIC 0xcc
#define NPE_OP_HASH_GEN_ICV 0x50
#define NPE_OP_ENC_GEN_KEY 0xc9
#define MOD_ECB 0x0000
#define MOD_CTR 0x1000
#define MOD_CBC_ENC 0x2000
#define MOD_CBC_DEC 0x3000
#define MOD_CCM_ENC 0x4000
#define MOD_CCM_DEC 0x5000
#define KEYLEN_128 4
#define KEYLEN_192 6
#define KEYLEN_256 8
#define CIPH_DECR 0x0000
#define CIPH_ENCR 0x0400
#define MOD_DES 0x0000
#define MOD_TDEA2 0x0100
#define MOD_3DES 0x0200
#define MOD_AES 0x0800
#define MOD_AES128 (0x0800 | KEYLEN_128)
#define MOD_AES192 (0x0900 | KEYLEN_192)
#define MOD_AES256 (0x0a00 | KEYLEN_256)
#define MAX_IVLEN 16
#define NPE_QLEN 16
/* Space for registering when the first
* NPE_QLEN crypt_ctl are busy */
#define NPE_QLEN_TOTAL 64
#define CTL_FLAG_UNUSED 0x0000
#define CTL_FLAG_USED 0x1000
#define CTL_FLAG_PERFORM_ABLK 0x0001
#define CTL_FLAG_GEN_ICV 0x0002
#define CTL_FLAG_GEN_REVAES 0x0004
#define CTL_FLAG_PERFORM_AEAD 0x0008
#define CTL_FLAG_MASK 0x000f
#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
#define MD5_DIGEST_SIZE 16
struct buffer_desc {
u32 phys_next;
#ifdef __ARMEB__
u16 buf_len;
u16 pkt_len;
#else
u16 pkt_len;
u16 buf_len;
#endif
dma_addr_t phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
enum dma_data_direction dir;
};
struct crypt_ctl {
#ifdef __ARMEB__
u8 mode; /* NPE_OP_* operation mode */
u8 init_len;
u16 reserved;
#else
u16 reserved;
u8 init_len;
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
u32 icv_rev_aes; /* icv or rev aes */
u32 src_buf;
u32 dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
u16 crypt_offs; /* Cryption start offset */
u16 crypt_len; /* Cryption data length */
#else
u16 auth_len; /* Authentication data length */
u16 auth_offs; /* Authentication start offset */
u16 crypt_len; /* Cryption data length */
u16 crypt_offs; /* Cryption start offset */
#endif
u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
u32 crypto_ctx; /* NPE Crypto Param structure address */
/* Used by Host: 4*4 bytes*/
unsigned int ctl_flags;
union {
struct skcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
struct buffer_desc *regist_buf;
u8 *regist_ptr;
};
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
u8 iv[MAX_IVLEN];
bool encrypt;
struct skcipher_request fallback_req; // keep at the end
};
struct aead_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
int encrypt;
};
struct ix_hash_algo {
u32 cfgword;
unsigned char *icv;
};
struct ix_sa_dir {
unsigned char *npe_ctx;
dma_addr_t npe_ctx_phys;
int npe_ctx_idx;
u8 npe_mode;
};
struct ixp_ctx {
struct ix_sa_dir encrypt;
struct ix_sa_dir decrypt;
int authkey_len;
u8 authkey[MAX_KEYLEN];
int enckey_len;
u8 enckey[MAX_KEYLEN];
u8 salt[MAX_IVLEN];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
unsigned int salted;
atomic_t configuring;
struct completion completion;
struct crypto_skcipher *fallback_tfm;
};
struct ixp_alg {
struct skcipher_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
int registered;
};
struct ixp_aead_alg {
struct aead_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
int registered;
};
static const struct ix_hash_algo hash_alg_md5 = {
.cfgword = 0xAA010004,
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
};
static const struct ix_hash_algo hash_alg_sha1 = {
.cfgword = 0x00000005,
.icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
"\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
};
static struct npe *npe_c;
static unsigned int send_qid;
static unsigned int recv_qid;
static struct dma_pool *buffer_pool;
static struct dma_pool *ctx_pool;
static struct crypt_ctl *crypt_virt;
static dma_addr_t crypt_phys;
static int support_aes = 1;
static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
}
static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
{
return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
}
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
}
static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
IS_ENABLED(CONFIG_64BIT)) &&
sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
return 0;
}
static DEFINE_SPINLOCK(desc_lock);
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
static int idx;
unsigned long flags;
spin_lock_irqsave(&desc_lock, flags);
if (unlikely(!crypt_virt))
setup_crypt_desc();
if (unlikely(!crypt_virt)) {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN)
idx = 0;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&desc_lock, flags);
return crypt_virt + i;
} else {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
}
static DEFINE_SPINLOCK(emerg_lock);
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
static int idx = NPE_QLEN;
struct crypt_ctl *desc;
unsigned long flags;
desc = get_crypt_desc();
if (desc)
return desc;
if (unlikely(!crypt_virt))
return NULL;
spin_lock_irqsave(&emerg_lock, flags);
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN_TOTAL)
idx = NPE_QLEN;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&emerg_lock, flags);
return crypt_virt + i;
} else {
spin_unlock_irqrestore(&emerg_lock, flags);
return NULL;
}
}
static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
dma_addr_t phys)
{
while (buf) {
struct buffer_desc *buf1;
u32 phys1;
buf1 = buf->next;
phys1 = buf->phys_next;
dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
}
}
static struct tasklet_struct crypto_done_tasklet;
static void finish_scattered_hmac(struct crypt_ctl *crypt)
{
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int authsize = crypto_aead_authsize(tfm);
int decryptlen = req->assoclen + req->cryptlen - authsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
static void one_packet(dma_addr_t phys)
{
struct device *dev = &pdev->dev;
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
failed = phys & 0x1 ? -EBADMSG : 0;
phys &= ~0x3;
crypt = crypt_phys2virt(phys);
switch (crypt->ctl_flags & CTL_FLAG_MASK) {
case CTL_FLAG_PERFORM_AEAD: {
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
if (req_ctx->hmac_virt)
finish_scattered_hmac(crypt);
aead_request_complete(req, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
struct skcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
unsigned int offset;
if (ivsize > 0) {
offset = req->cryptlen - ivsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req->iv, req->dst,
offset, ivsize, 0);
} else {
memcpy(req->iv, req_ctx->iv, ivsize);
memzero_explicit(req_ctx->iv, ivsize);
}
}
if (req_ctx->dst)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
skcipher_request_complete(req, failed);
break;
}
case CTL_FLAG_GEN_ICV:
ctx = crypto_tfm_ctx(crypt->data.tfm);
dma_pool_free(ctx_pool, crypt->regist_ptr,
crypt->regist_buf->phys_addr);
dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
*(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
default:
BUG();
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
}
static void irqhandler(void *_unused)
{
tasklet_schedule(&crypto_done_tasklet);
}
static void crypto_done_action(unsigned long arg)
{
int i;
for (i = 0; i < 4; i++) {
dma_addr_t phys = qmgr_get_entry(recv_qid);
if (!phys)
return;
one_packet(phys);
}
tasklet_schedule(&crypto_done_tasklet);
}
static int init_ixp_crypto(struct device *dev)
{
struct device_node *np = dev->of_node;
u32 msg[2] = { 0, 0 };
int ret = -ENODEV;
u32 npe_id;
dev_info(dev, "probing...\n");
/* Locate the NPE and queue manager to use from device tree */
if (IS_ENABLED(CONFIG_OF) && np) {
struct of_phandle_args queue_spec;
struct of_phandle_args npe_spec;
ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
1, 0, &npe_spec);
if (ret) {
dev_err(dev, "no NPE engine specified\n");
return -ENODEV;
}
npe_id = npe_spec.args[0];
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
if (ret) {
dev_err(dev, "no rx queue phandle\n");
return -ENODEV;
}
recv_qid = queue_spec.args[0];
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
if (ret) {
dev_err(dev, "no txready queue phandle\n");
return -ENODEV;
}
send_qid = queue_spec.args[0];
} else {
/*
* Hardcoded engine when using platform data, this goes away
* when we switch to using DT only.
*/
npe_id = 2;
send_qid = 29;
recv_qid = 30;
}
npe_c = npe_request(npe_id);
if (!npe_c)
return ret;
if (!npe_running(npe_c)) {
ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
if (ret)
goto npe_release;
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
} else {
if (npe_send_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
}
switch ((msg[1] >> 16) & 0xff) {
case 3:
dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
support_aes = 0;
break;
case 4:
case 5:
support_aes = 1;
break;
default:
dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
ret = -ENODEV;
goto npe_release;
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
32, 0);
ret = -ENOMEM;
if (!buffer_pool)
goto err;
ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
if (!ctx_pool)
goto err;
ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
"ixp_crypto:out", NULL);
if (ret)
goto err;
ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
"ixp_crypto:in", NULL);
if (ret) {
qmgr_release_queue(send_qid);
goto err;
}
qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
qmgr_enable_irq(recv_qid);
return 0;
npe_error:
dev_err(dev, "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release:
npe_release(npe_c);
return ret;
}
static void release_ixp_crypto(struct device *dev)
{
qmgr_disable_irq(recv_qid);
tasklet_kill(&crypto_done_tasklet);
qmgr_release_queue(send_qid);
qmgr_release_queue(recv_qid);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
if (crypt_virt)
dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys);
}
static void reset_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dir->npe_ctx_idx = 0;
dir->npe_mode = 0;
}
static int init_sa_dir(struct ix_sa_dir *dir)
{
dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
if (!dir->npe_ctx)
return -ENOMEM;
reset_sa_dir(dir);
return 0;
}
static void free_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
}
static int init_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
atomic_set(&ctx->configuring, 0);
ret = init_sa_dir(&ctx->encrypt);
if (ret)
return ret;
ret = init_sa_dir(&ctx->decrypt);
if (ret)
free_sa_dir(&ctx->encrypt);
return ret;
}
static int init_tfm_ablk(struct crypto_skcipher *tfm)
{
struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
const char *name = crypto_tfm_alg_name(ctfm);
ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_tfm)) {
pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(ctx->fallback_tfm));
return PTR_ERR(ctx->fallback_tfm);
}
pr_info("Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&tfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
);
crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
return init_tfm(crypto_skcipher_tfm(tfm));
}
static int init_tfm_aead(struct crypto_aead *tfm)
{
crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
return init_tfm(crypto_aead_tfm(tfm));
}
static void exit_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
free_sa_dir(&ctx->encrypt);
free_sa_dir(&ctx->decrypt);
}
static void exit_tfm_ablk(struct crypto_skcipher *tfm)
{
struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
crypto_free_skcipher(ctx->fallback_tfm);
exit_tfm(crypto_skcipher_tfm(tfm));
}
static void exit_tfm_aead(struct crypto_aead *tfm)
{
exit_tfm(crypto_aead_tfm(tfm));
}
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
int init_len, u32 ctx_addr, const u8 *key,
int key_len)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypt_ctl *crypt;
struct buffer_desc *buf;
int i;
u8 *pad;
dma_addr_t pad_phys, buf_phys;
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
if (!pad)
return -ENOMEM;
buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
if (!buf) {
dma_pool_free(ctx_pool, pad, pad_phys);
return -ENOMEM;
}
crypt = get_crypt_desc_emerg();
if (!crypt) {
dma_pool_free(ctx_pool, pad, pad_phys);
dma_pool_free(buffer_pool, buf, buf_phys);
return -EAGAIN;
}
memcpy(pad, key, key_len);
memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
pad[i] ^= xpad;
crypt->data.tfm = tfm;
crypt->regist_ptr = pad;
crypt->regist_buf = buf;
crypt->auth_offs = 0;
crypt->auth_len = HMAC_PAD_BLOCKLEN;
crypt->crypto_ctx = ctx_addr;
crypt->src_buf = buf_phys;
crypt->icv_rev_aes = target;
crypt->mode = NPE_OP_HASH_GEN_ICV;
crypt->init_len = init_len;
crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
buf->next = NULL;
buf->buf_len = HMAC_PAD_BLOCKLEN;
buf->pkt_len = 0;
buf->phys_addr = pad_phys;
atomic_inc(&ctx->configuring);
qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(send_qid));
return 0;
}
static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
const u8 *key, int key_len, unsigned int digest_len)
{
u32 itarget, otarget, npe_ctx_addr;
unsigned char *cinfo;
int init_len, ret = 0;
u32 cfgword;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
const struct ix_hash_algo *algo;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx + dir->npe_ctx_idx;
algo = ix_hash(tfm);
/* write cfg word to cryptinfo */
cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
#ifndef __ARMEB__
cfgword ^= 0xAA000000; /* change the "byte swap" flags */
#endif
*(__be32 *)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
memcpy(cinfo, algo->icv, digest_len);
cinfo += digest_len;
itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+ sizeof(algo->cfgword);
otarget = itarget + digest_len;
init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
dir->npe_ctx_idx += init_len;
dir->npe_mode |= NPE_OP_HASH_ENABLE;
if (!encrypt)
dir->npe_mode |= NPE_OP_HASH_VERIFY;
ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
init_len, npe_ctx_addr, key, key_len);
if (ret)
return ret;
return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
init_len, npe_ctx_addr, key, key_len);
}
static int gen_rev_aes_key(struct crypto_tfm *tfm)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct ix_sa_dir *dir = &ctx->decrypt;
crypt = get_crypt_desc_emerg();
if (!crypt)
return -EAGAIN;
*(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
crypt->crypt_len = AES_BLOCK128;
crypt->src_buf = 0;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
crypt->mode = NPE_OP_ENC_GEN_KEY;
crypt->init_len = dir->npe_ctx_idx;
crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
atomic_inc(&ctx->configuring);
qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(send_qid));
return 0;
}
static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
int key_len)
{
u8 *cinfo;
u32 cipher_cfg;
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
if (encrypt) {
cipher_cfg = cipher_cfg_enc(tfm);
dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
} else {
cipher_cfg = cipher_cfg_dec(tfm);
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
case 16:
keylen_cfg = MOD_AES128;
break;
case 24:
keylen_cfg = MOD_AES192;
break;
case 32:
keylen_cfg = MOD_AES256;
break;
default:
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else {
err = crypto_des_verify_key(tfm, key);
if (err)
return err;
}
/* write cfg word to cryptinfo */
*(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
memcpy(cinfo, key, key_len);
/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
key_len = DES3_EDE_KEY_SIZE;
}
dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
if ((cipher_cfg & MOD_AES) && !encrypt)
return gen_rev_aes_key(tfm);
return 0;
}
static struct buffer_desc *chainup_buffers(struct device *dev,
struct scatterlist *sg, unsigned int nbytes,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
for (; nbytes > 0; sg = sg_next(sg)) {
unsigned int len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
dma_addr_t next_buf_phys;
void *ptr;
nbytes -= len;
ptr = sg_virt(sg);
next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
if (!next_buf) {
buf = NULL;
break;
}
sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
buf->next = next_buf;
buf->phys_next = next_buf_phys;
buf = next_buf;
buf->phys_addr = sg_dma_address(sg);
buf->buf_len = len;
buf->dir = dir;
}
buf->next = NULL;
buf->phys_next = 0;
return buf;
}
static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ret = setup_cipher(&tfm->base, 0, key, key_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
if (ret)
return ret;
crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
}
static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
return verify_skcipher_des3_key(tfm, key) ?:
ablk_setkey(tfm, key, key_len);
}
static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ablk_setkey(tfm, key, key_len);
}
static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
struct ablk_ctx *rctx = skcipher_request_ctx(areq);
int err;
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
areq->base.complete, areq->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (encrypt)
err = crypto_skcipher_encrypt(&rctx->fallback_req);
else
err = crypto_skcipher_decrypt(&rctx->fallback_req);
return err;
}
static int ablk_perform(struct skcipher_request *req, int encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int nbytes = req->cryptlen;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct buffer_desc src_hook;
struct device *dev = &pdev->dev;
unsigned int offset;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
return ixp4xx_cipher_fallback(req, encrypt);
if (qmgr_stat_full(send_qid))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
req_ctx->encrypt = encrypt;
crypt = get_crypt_desc();
if (!crypt)
return -ENOMEM;
crypt->data.ablk_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
if (ivsize > 0 && !encrypt) {
offset = req->cryptlen - ivsize;
scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
}
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
req_ctx->dst = dst_hook.next;
crypt->dst_buf = dst_hook.phys_next;
} else {
req_ctx->dst = NULL;
}
req_ctx->src = NULL;
if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
src_direction))
goto free_buf_src;
req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(send_qid));
return -EINPROGRESS;
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dest:
if (req->src != req->dst)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
static int ablk_encrypt(struct skcipher_request *req)
{
return ablk_perform(req, 1);
}
static int ablk_decrypt(struct skcipher_request *req)
{
return ablk_perform(req, 0);
}
static int ablk_rfc3686_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
u8 *info = req->iv;
int ret;
/* set up counter block */
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
req->iv = iv;
ret = ablk_perform(req, 1);
req->iv = info;
return ret;
}
static int aead_perform(struct aead_request *req, int encrypt,
int cryptoffset, int eff_cryptlen, u8 *iv)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int ivsize = crypto_aead_ivsize(tfm);
unsigned int authsize = crypto_aead_authsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
unsigned int lastlen;
if (qmgr_stat_full(send_qid))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
if (encrypt) {
dir = &ctx->encrypt;
cryptlen = req->cryptlen;
} else {
dir = &ctx->decrypt;
/* req->cryptlen includes the authsize when decrypting */
cryptlen = req->cryptlen - authsize;
eff_cryptlen -= authsize;
}
crypt = get_crypt_desc();
if (!crypt)
return -ENOMEM;
crypt->data.aead_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = cryptoffset;
crypt->crypt_len = eff_cryptlen;
crypt->auth_offs = 0;
crypt->auth_len = req->assoclen + cryptlen;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
buf = chainup_buffers(dev, req->src, crypt->auth_len,
&src_hook, flags, src_direction);
req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
if (!buf)
goto free_buf_src;
lastlen = buf->buf_len;
if (lastlen >= authsize)
crypt->icv_rev_aes = buf->phys_addr +
buf->buf_len - authsize;
req_ctx->dst = NULL;
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
src_direction = DMA_TO_DEVICE;
buf = chainup_buffers(dev, req->dst, crypt->auth_len,
&dst_hook, flags, DMA_FROM_DEVICE);
req_ctx->dst = dst_hook.next;
crypt->dst_buf = dst_hook.phys_next;
if (!buf)
goto free_buf_dst;
if (encrypt) {
lastlen = buf->buf_len;
if (lastlen >= authsize)
crypt->icv_rev_aes = buf->phys_addr +
buf->buf_len - authsize;
}
}
if (unlikely(lastlen < authsize)) {
dma_addr_t dma;
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
crypt->icv_rev_aes = dma;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
}
req_ctx->encrypt = encrypt;
} else {
req_ctx->hmac_virt = NULL;
}
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(send_qid));
return -EINPROGRESS;
free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int digest_len = crypto_aead_maxauthsize(tfm);
int ret;
if (!ctx->enckey_len && !ctx->authkey_len)
return 0;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int max = crypto_aead_maxauthsize(tfm) >> 2;
if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
return -EINVAL;
return aead_setup(tfm, authsize);
}
static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
if (keys.authkeylen > sizeof(ctx->authkey))
goto badkey;
if (keys.enckeylen > sizeof(ctx->enckey))
goto badkey;
memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
ctx->authkey_len = keys.authkeylen;
ctx->enckey_len = keys.enckeylen;
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
int err;
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
goto badkey;
err = -EINVAL;
if (keys.authkeylen > sizeof(ctx->authkey))
goto badkey;
err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
if (err)
goto badkey;
memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
ctx->authkey_len = keys.authkeylen;
ctx->enckey_len = keys.enckeylen;
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
memzero_explicit(&keys, sizeof(keys));
return err;
}
static int aead_encrypt(struct aead_request *req)
{
return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
}
static int aead_decrypt(struct aead_request *req)
{
return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
}
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
.base.cra_name = "cbc(des)",
.base.cra_blocksize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.base.cra_name = "ecb(des)",
.base.cra_blocksize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.base.cra_name = "cbc(aes)",
.base.cra_blocksize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.base.cra_name = "ecb(aes)",
.base.cra_blocksize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
.base.cra_name = "ctr(aes)",
.base.cra_blocksize = 1,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.base.cra_name = "rfc3686(ctr(aes))",
.base.cra_blocksize = 1,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_rfc3686_setkey,
.encrypt = ablk_rfc3686_crypt,
.decrypt = ablk_rfc3686_crypt,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
} };
static struct ixp_aead_alg ixp4xx_aeads[] = {
{
.crypto = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
},
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
.setkey = des3_aead_setkey,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
},
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.setkey = des3_aead_setkey,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
} };
#define IXP_POSTFIX "-ixp4xx"
static int ixp_crypto_probe(struct platform_device *_pdev)
{
struct device *dev = &_pdev->dev;
int num = ARRAY_SIZE(ixp4xx_algos);
int i, err;
pdev = _pdev;
err = init_ixp_crypto(dev);
if (err)
return err;
for (i = 0; i < num; i++) {
struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s"IXP_POSTFIX, cra->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
continue;
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
continue;
/* block ciphers */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK;
if (!cra->setkey)
cra->setkey = ablk_setkey;
if (!cra->encrypt)
cra->encrypt = ablk_encrypt;
if (!cra->decrypt)
cra->decrypt = ablk_decrypt;
cra->init = init_tfm_ablk;
cra->exit = exit_tfm_ablk;
cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
cra->base.cra_module = THIS_MODULE;
cra->base.cra_alignmask = 3;
cra->base.cra_priority = 300;
if (crypto_register_skcipher(cra))
dev_err(&pdev->dev, "Failed to register '%s'\n",
cra->base.cra_name);
else
ixp4xx_algos[i].registered = 1;
}
for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s"IXP_POSTFIX, cra->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
continue;
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
continue;
/* authenc */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY;
cra->setkey = cra->setkey ?: aead_setkey;
cra->setauthsize = aead_setauthsize;
cra->encrypt = aead_encrypt;
cra->decrypt = aead_decrypt;
cra->init = init_tfm_aead;
cra->exit = exit_tfm_aead;
cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
cra->base.cra_module = THIS_MODULE;
cra->base.cra_alignmask = 3;
cra->base.cra_priority = 300;
if (crypto_register_aead(cra))
dev_err(&pdev->dev, "Failed to register '%s'\n",
cra->base.cra_driver_name);
else
ixp4xx_aeads[i].registered = 1;
}
return 0;
}
static int ixp_crypto_remove(struct platform_device *pdev)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
if (ixp4xx_aeads[i].registered)
crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
}
for (i = 0; i < num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
return 0;
}
static const struct of_device_id ixp4xx_crypto_of_match[] = {
{
.compatible = "intel,ixp4xx-crypto",
},
{},
};
static struct platform_driver ixp_crypto_driver = {
.probe = ixp_crypto_probe,
.remove = ixp_crypto_remove,
.driver = {
.name = "ixp4xx_crypto",
.of_match_table = ixp4xx_crypto_of_match,
},
};
module_platform_driver(ixp_crypto_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
MODULE_DESCRIPTION("IXP4xx hardware crypto");
| linux-master | drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include "safexcel.h"
static u32 max_rings = EIP197_MAX_RINGS;
module_param(max_rings, uint, 0644);
MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
{
int i;
/*
* Map all interfaces/rings to register index 0
* so they can share contexts. Without this, the EIP197 will
* assume each interface/ring to be in its own memory domain
* i.e. have its own subset of UNIQUE memory addresses.
* Which would cause records with the SAME memory address to
* use DIFFERENT cache buffers, causing both poor cache utilization
* AND serious coherence/invalidation issues.
*/
for (i = 0; i < 4; i++)
writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
/*
* Initialize other virtualization regs for cache
* These may not be in their reset state ...
*/
for (i = 0; i < priv->config.rings; i++) {
writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
writel(EIP197_FLUE_CONFIG_MAGIC,
priv->base + EIP197_FLUE_CONFIG(i));
}
writel(0, priv->base + EIP197_FLUE_OFFSETS);
writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
}
static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
u32 addrmid, int *actbank)
{
u32 val;
int curbank;
curbank = addrmid >> 16;
if (curbank != *actbank) {
val = readl(priv->base + EIP197_CS_RAM_CTRL);
val = (val & ~EIP197_CS_BANKSEL_MASK) |
(curbank << EIP197_CS_BANKSEL_OFS);
writel(val, priv->base + EIP197_CS_RAM_CTRL);
*actbank = curbank;
}
}
static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
int maxbanks, u32 probemask, u32 stride)
{
u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
int actbank;
/*
* And probe the actual size of the physically attached cache data RAM
* Using a binary subdivision algorithm downto 32 byte cache lines.
*/
addrhi = 1 << (16 + maxbanks);
addrlo = 0;
actbank = min(maxbanks - 1, 0);
while ((addrhi - addrlo) > stride) {
/* write marker to lowest address in top half */
addrmid = (addrhi + addrlo) >> 1;
marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
writel(marker,
priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
/* write invalid markers to possible aliases */
delta = 1 << __fls(addrmid);
while (delta >= stride) {
addralias = addrmid - delta;
eip197_trc_cache_banksel(priv, addralias, &actbank);
writel(~marker,
priv->base + EIP197_CLASSIFICATION_RAMS +
(addralias & 0xffff));
delta >>= 1;
}
/* read back marker from top half */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
if ((val & probemask) == marker)
/* read back correct, continue with top half */
addrlo = addrmid;
else
/* not read back correct, continue with bottom half */
addrhi = addrmid;
}
return addrhi;
}
static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
int cs_rc_max, int cs_ht_wc)
{
int i;
u32 htable_offset, val, offset;
/* Clear all records in administration RAM */
for (i = 0; i < cs_rc_max; i++) {
offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
EIP197_CS_RC_PREV(EIP197_RC_NULL),
priv->base + offset);
val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
if (i == 0)
val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
else if (i == cs_rc_max - 1)
val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
writel(val, priv->base + offset + 4);
/* must also initialize the address key due to ECC! */
writel(0, priv->base + offset + 8);
writel(0, priv->base + offset + 12);
}
/* Clear the hash table entries */
htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
for (i = 0; i < cs_ht_wc; i++)
writel(GENMASK(29, 0),
priv->base + EIP197_CLASSIFICATION_RAMS +
htable_offset + i * sizeof(u32));
}
static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, dsize, asize;
int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
int cs_rc_abs_max, cs_ht_sz;
int maxbanks;
/* Setup (dummy) virtualization for cache */
eip197_trc_cache_setupvirt(priv);
/*
* Enable the record cache memory access and
* probe the bank select width
*/
val = readl(priv->base + EIP197_CS_RAM_CTRL);
val &= ~EIP197_TRC_ENABLE_MASK;
val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
writel(val, priv->base + EIP197_CS_RAM_CTRL);
val = readl(priv->base + EIP197_CS_RAM_CTRL);
maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
/* Clear all ECC errors */
writel(0, priv->base + EIP197_TRC_ECCCTRL);
/*
* Make sure the cache memory is accessible by taking record cache into
* reset. Need data memory access here, not admin access.
*/
val = readl(priv->base + EIP197_TRC_PARAMS);
val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed data RAM size in bytes */
dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
/*
* Now probe the administration RAM size pretty much the same way
* Except that only the lower 30 bits are writable and we don't need
* bank selects
*/
val = readl(priv->base + EIP197_TRC_PARAMS);
/* admin access now */
val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed admin RAM size in admin words */
asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
/* Clear any ECC errors detected while probing! */
writel(0, priv->base + EIP197_TRC_ECCCTRL);
/* Sanity check probing results */
if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
dev_err(priv->dev, "Record cache probing failed (%d,%d).",
dsize, asize);
return -ENODEV;
}
/*
* Determine optimal configuration from RAM sizes
* Note that we assume that the physical RAM configuration is sane
* Therefore, we don't do any parameter error checking here ...
*/
/* For now, just use a single record format covering everything */
cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
/*
* Step #1: How many records will physically fit?
* Hard upper limit is 1023!
*/
cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
/* Step #2: Need at least 2 words in the admin RAM per record */
cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
/* Step #3: Determine log2 of hash table size */
cs_ht_sz = __fls(asize - cs_rc_max) - 2;
/* Step #4: determine current size of hash table in dwords */
cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
/* Step #5: add back excess words and see if we can fit more records */
cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
/* Clear the cache RAMs */
eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
/* Disable the record cache memory access */
val = readl(priv->base + EIP197_CS_RAM_CTRL);
val &= ~EIP197_TRC_ENABLE_MASK;
writel(val, priv->base + EIP197_CS_RAM_CTRL);
/* Write head and tail pointers of the record free chain */
val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
writel(val, priv->base + EIP197_TRC_FREECHAIN);
/* Configure the record cache #1 */
val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
writel(val, priv->base + EIP197_TRC_PARAMS2);
/* Configure the record cache #2 */
val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
writel(val, priv->base + EIP197_TRC_PARAMS);
dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
return 0;
}
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
{
int pe, i;
u32 val;
for (pe = 0; pe < priv->config.pes; pe++) {
/* Configure the token FIFO's */
writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
/* Clear the ICE scratchpad memory */
val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
/* clear the scratchpad RAM using 32 bit writes only */
for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
writel(0, EIP197_PE(priv) +
EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
/* Reset the IFPP engine to make its program mem accessible */
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
/* Reset the IPUE engine to make its program mem accessible */
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
/* Enable access to all IFPP program memories */
writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* bypass the OCE, if present */
if (priv->flags & EIP197_OCE)
writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
EIP197_PE_DEBUG(pe));
}
}
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
const struct firmware *fw)
{
u32 val;
int i;
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++) {
if (priv->data->fw_little_endian)
val = le32_to_cpu(((const __le32 *)fw->data)[i]);
else
val = be32_to_cpu(((const __be32 *)fw->data)[i]);
writel(val,
priv->base + EIP197_CLASSIFICATION_RAMS +
i * sizeof(val));
}
/* Exclude final 2 NOPs from size */
return i - EIP197_FW_TERMINAL_NOPS;
}
/*
* If FW is actual production firmware, then poll for its initialization
* to complete and check if it is good for the HW, otherwise just return OK.
*/
static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
{
int pe, pollcnt;
u32 base, pollofs;
if (fpp)
pollofs = EIP197_FW_FPP_READY;
else
pollofs = EIP197_FW_PUE_READY;
for (pe = 0; pe < priv->config.pes; pe++) {
base = EIP197_PE_ICE_SCRATCH_RAM(pe);
pollcnt = EIP197_FW_START_POLLCNT;
while (pollcnt &&
(readl_relaxed(EIP197_PE(priv) + base +
pollofs) != 1)) {
pollcnt--;
}
if (!pollcnt) {
dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
fpp, pe);
return false;
}
}
return true;
}
static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
int ipuesz, int ifppsz, int minifw)
{
int pe;
u32 val;
for (pe = 0; pe < priv->config.pes; pe++) {
/* Disable access to all program memory */
writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Start IFPP microengines */
if (minifw)
val = 0;
else
val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
EIP197_PE_ICE_UENG_DEBUG_RESET;
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
/* Start IPUE microengines */
if (minifw)
val = 0;
else
val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
EIP197_PE_ICE_UENG_DEBUG_RESET;
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
}
/* For miniFW startup, there is no initialization, so always succeed */
if (minifw)
return true;
/* Wait until all the firmwares have properly started up */
if (!poll_fw_ready(priv, 1))
return false;
if (!poll_fw_ready(priv, 0))
return false;
return true;
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
const struct firmware *fw[FW_NB];
char fw_path[37], *dir = NULL;
int i, j, ret = 0, pe;
int ipuesz, ifppsz, minifw = 0;
if (priv->data->version == EIP197D_MRVL)
dir = "eip197d";
else if (priv->data->version == EIP197B_MRVL ||
priv->data->version == EIP197_DEVBRD)
dir = "eip197b";
else if (priv->data->version == EIP197C_MXL)
dir = "eip197c";
else
return -ENODEV;
retry_fw:
for (i = 0; i < FW_NB; i++) {
snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
if (ret) {
if (minifw || priv->data->version != EIP197B_MRVL)
goto release_fw;
/* Fallback to the old firmware location for the
* EIP197b.
*/
ret = firmware_request_nowarn(&fw[i], fw_name[i],
priv->dev);
if (ret)
goto release_fw;
}
}
eip197_init_firmware(priv);
ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
/* Enable access to IPUE program memories */
for (pe = 0; pe < priv->config.pes; pe++)
writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
dev_dbg(priv->dev, "Firmware loaded successfully\n");
return 0;
}
ret = -ENODEV;
release_fw:
for (j = 0; j < i; j++)
release_firmware(fw[j]);
if (!minifw) {
/* Retry with minifw path */
dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
dir = "eip197_minifw";
minifw = 1;
goto retry_fw;
}
dev_err(priv->dev, "Firmware load failed.\n");
return ret;
}
static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
{
u32 cd_size_rnd, val;
int i, cd_fetch_cnt;
cd_size_rnd = (priv->config.cd_size +
(BIT(priv->hwconfig.hwdataw) - 1)) >>
priv->hwconfig.hwdataw;
/* determine number of CD's we can fetch into the CD FIFO as 1 block */
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
(priv->config.pes * EIP197_FETCH_DEPTH));
} else {
/* for the EIP97, just fetch all that fits minus 1 */
cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
cd_size_rnd) - 1;
}
/*
* Since we're using command desc's way larger than formally specified,
* we need to check whether we can fit even 1 for low-end EIP196's!
*/
if (!cd_fetch_cnt) {
dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
return -ENODEV;
}
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
(priv->config.cd_offset << 14) | priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
(cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
}
static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
{
u32 rd_size_rnd, val;
int i, rd_fetch_cnt;
/* determine number of RD's we can fetch into the FIFO as one block */
rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
(BIT(priv->hwconfig.hwdataw) - 1)) >>
priv->hwconfig.hwdataw;
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
(priv->config.pes * EIP197_FETCH_DEPTH));
} else {
/* for the EIP97, just fetch all that fits minus 1 */
rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
rd_size_rnd) - 1;
}
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
priv->config.rd_size,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((rd_fetch_cnt *
(rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
(rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
writel(val,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
}
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 val;
int i, ret, pe, opbuflo, opbufhi;
dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
priv->config.pes, priv->config.rings);
/*
* For EIP197's only set maximum number of TX commands to 2^5 = 32
* Skip for the EIP97 as it does not have this field.
*/
if (priv->flags & SAFEXCEL_HW_EIP197) {
val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
}
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Processing Engine configuration */
for (pe = 0; pe < priv->config.pes; pe++) {
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
if (priv->flags & EIP197_PE_ARB)
/* Reset HIA input interface arbiter (if present) */
writel(EIP197_HIA_RA_PE_CTRL_RESET,
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
/* Leave the DFE threads reset state */
writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
/* Configure the processing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
EIP197_PE_IN_xBUF_THRES_MAX(9),
EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
EIP197_PE_IN_xBUF_THRES_MAX(7),
EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
if (priv->flags & SAFEXCEL_HW_EIP197)
/* enable HIA input interface arbiter and rings */
writel(EIP197_HIA_RA_PE_CTRL_EN |
GENMASK(priv->config.rings - 1, 0),
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
/* Wait for all DSE threads to complete */
while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
/* DMA transfer size to use */
if (priv->hwconfig.hwnumpes > 4) {
opbuflo = 9;
opbufhi = 10;
} else {
opbuflo = 7;
opbufhi = 8;
}
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
/* FIXME: instability issues can occur for EIP97 but disabling
* it impacts performance.
*/
if (priv->flags & SAFEXCEL_HW_EIP197)
val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
/* Leave the DSE threads reset state */
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
/* Configure the processing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
/* Processing Engine configuration */
/* Token & context configuration */
val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
/* H/W capabilities selection: just enable everything */
writel(EIP197_FUNCTION_ALL,
EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
writel(EIP197_FUNCTION_ALL,
EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
}
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
for (pe = 0; pe < priv->config.pes; pe++) {
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
}
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
if (priv->flags & EIP197_SIMPLE_TRC) {
writel(EIP197_STRC_CONFIG_INIT |
EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
priv->base + EIP197_STRC_CONFIG);
writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
} else if (priv->flags & SAFEXCEL_HW_EIP197) {
ret = eip197_trc_cache_init(priv);
if (ret)
return ret;
}
if (priv->flags & EIP197_ICE) {
ret = eip197_load_firmwares(priv);
if (ret)
return ret;
}
return safexcel_hw_setup_cdesc_rings(priv) ?:
safexcel_hw_setup_rdesc_rings(priv) ?:
0;
}
/* Called with ring's lock taken */
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
int ring)
{
int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
if (!coal)
return;
/* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
}
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
* proceeded it first,
*/
req = priv->ring[ring].req;
backlog = priv->ring[ring].backlog;
if (req)
goto handle_req;
while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!req) {
priv->ring[ring].req = NULL;
priv->ring[ring].backlog = NULL;
goto finalize;
}
handle_req:
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, &commands, &results);
if (ret)
goto request_failed;
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
/* In case the send() helper did not issue any command to push
* to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error.
*/
if (!commands && !results)
continue;
cdesc += commands;
rdesc += results;
nreq++;
}
request_failed:
/* Not enough resources to handle all the requests. Bail out and save
* the request and the backlog for the next dequeue call (per-ring).
*/
priv->ring[ring].req = req;
priv->ring[ring].backlog = backlog;
finalize:
if (!nreq)
return;
spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq;
if (!priv->ring[ring].busy) {
safexcel_try_push_requests(priv, ring);
priv->ring[ring].busy = true;
}
spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset),
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void *rdp)
{
struct safexcel_result_desc *rdesc = rdp;
struct result_data_desc *result_data = rdp + priv->config.res_offset;
if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
((!rdesc->descriptor_overflow) &&
(!rdesc->buffer_overflow) &&
(!result_data->error_code))))
return 0;
if (rdesc->descriptor_overflow)
dev_err(priv->dev, "Descriptor overflow detected");
if (rdesc->buffer_overflow)
dev_err(priv->dev, "Buffer overflow detected");
if (result_data->error_code & 0x4066) {
/* Fatal error (bits 1,2,5,6 & 14) */
dev_err(priv->dev,
"result descriptor error (%x)",
result_data->error_code);
return -EIO;
} else if (result_data->error_code &
(BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
/*
* Give priority over authentication fails:
* Blocksize, length & overflow errors,
* something wrong with the input!
*/
return -EINVAL;
} else if (result_data->error_code & BIT(9)) {
/* Authentication failed */
return -EBADMSG;
}
/* All other non-fatal errors */
return -EINVAL;
}
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
int ring,
struct safexcel_result_desc *rdesc,
struct crypto_async_request *req)
{
int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
priv->ring[ring].rdr_req[i] = req;
}
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
{
int i = safexcel_ring_first_rdr_index(priv, ring);
return priv->ring[ring].rdr_req[i];
}
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
/* Acknowledge the command descriptors */
do {
cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
if (IS_ERR(cdesc)) {
dev_err(priv->dev,
"Could not retrieve the command descriptor\n");
return;
}
} while (!cdesc->last_seg);
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring)
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
struct safexcel_token *dmmy;
int ret = 0;
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
&dmmy);
if (IS_ERR(cdesc))
return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
/* Prepare result descriptor */
rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
goto cdesc_rollback;
}
safexcel_rdr_req_set(priv, ring, rdesc, async);
return ret;
cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
return ret;
}
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring)
{
struct crypto_async_request *req;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
handle_results:
tot_descs = 0;
nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
goto requests_left;
for (i = 0; i < nreq; i++) {
req = safexcel_rdr_req_get(priv, ring);
ctx = crypto_tfm_ctx(req->tfm);
ndesc = ctx->handle_result(priv, ring, req,
&should_complete, &ret);
if (ndesc < 0) {
dev_err(priv->dev, "failed to handle result (%d)\n",
ndesc);
goto acknowledge;
}
if (should_complete) {
local_bh_disable();
crypto_request_complete(req, ret);
local_bh_enable();
}
tot_descs += ndesc;
handled++;
}
acknowledge:
if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
(tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
/* If the number of requests overflowed the counter, try to proceed more
* requests.
*/
if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
goto handle_results;
requests_left:
spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
spin_unlock_bh(&priv->ring[ring].lock);
}
static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
struct safexcel_crypto_priv *priv;
int ring;
};
static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
* Fatal error, the RDR is unusable and must be
* reinitialized. This should not happen under
* normal circumstances.
*/
dev_err(priv->dev, "RDR: fatal error.\n");
} else if (likely(stat & EIP197_xDR_THRESH)) {
rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
return rc;
}
static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
int ring = irq_data->ring;
safexcel_handle_result_descriptor(priv, ring);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
struct pci_dev *pci_pdev = pdev;
dev = &pci_pdev->dev;
irq = pci_irq_vector(pci_pdev, irqid);
if (irq < 0) {
dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
irqid, irq);
return irq;
}
} else if (IS_ENABLED(CONFIG_OF)) {
struct platform_device *plf_pdev = pdev;
char irq_name[6] = {0}; /* "ringX\0" */
snprintf(irq_name, 6, "ring%d", irqid);
dev = &plf_pdev->dev;
irq = platform_get_irq_byname(plf_pdev, irq_name);
if (irq < 0)
return irq;
} else {
return -ENXIO;
}
ret = devm_request_threaded_irq(dev, irq, handler,
threaded_handler, IRQF_ONESHOT,
dev_name(dev), ring_irq_priv);
if (ret) {
dev_err(dev, "unable to request IRQ %d\n", irq);
return ret;
}
/* Set affinity */
cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
return irq;
}
static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_ecb_des,
&safexcel_alg_cbc_des,
&safexcel_alg_ecb_des3_ede,
&safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
&safexcel_alg_cfb_aes,
&safexcel_alg_ofb_aes,
&safexcel_alg_ctr_aes,
&safexcel_alg_md5,
&safexcel_alg_sha1,
&safexcel_alg_sha224,
&safexcel_alg_sha256,
&safexcel_alg_sha384,
&safexcel_alg_sha512,
&safexcel_alg_hmac_md5,
&safexcel_alg_hmac_sha1,
&safexcel_alg_hmac_sha224,
&safexcel_alg_hmac_sha256,
&safexcel_alg_hmac_sha384,
&safexcel_alg_hmac_sha512,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
&safexcel_alg_authenc_hmac_sha384_cbc_aes,
&safexcel_alg_authenc_hmac_sha512_cbc_aes,
&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha1_ctr_aes,
&safexcel_alg_authenc_hmac_sha224_ctr_aes,
&safexcel_alg_authenc_hmac_sha256_ctr_aes,
&safexcel_alg_authenc_hmac_sha384_ctr_aes,
&safexcel_alg_authenc_hmac_sha512_ctr_aes,
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
&safexcel_alg_crc32,
&safexcel_alg_cbcmac,
&safexcel_alg_xcbcmac,
&safexcel_alg_cmac,
&safexcel_alg_chacha20,
&safexcel_alg_chachapoly,
&safexcel_alg_chachapoly_esp,
&safexcel_alg_sm3,
&safexcel_alg_hmac_sm3,
&safexcel_alg_ecb_sm4,
&safexcel_alg_cbc_sm4,
&safexcel_alg_ofb_sm4,
&safexcel_alg_cfb_sm4,
&safexcel_alg_ctr_sm4,
&safexcel_alg_authenc_hmac_sha1_cbc_sm4,
&safexcel_alg_authenc_hmac_sm3_cbc_sm4,
&safexcel_alg_authenc_hmac_sha1_ctr_sm4,
&safexcel_alg_authenc_hmac_sm3_ctr_sm4,
&safexcel_alg_sha3_224,
&safexcel_alg_sha3_256,
&safexcel_alg_sha3_384,
&safexcel_alg_sha3_512,
&safexcel_alg_hmac_sha3_224,
&safexcel_alg_hmac_sha3_256,
&safexcel_alg_hmac_sha3_384,
&safexcel_alg_hmac_sha3_512,
&safexcel_alg_authenc_hmac_sha1_cbc_des,
&safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha256_cbc_des,
&safexcel_alg_authenc_hmac_sha224_cbc_des,
&safexcel_alg_authenc_hmac_sha512_cbc_des,
&safexcel_alg_authenc_hmac_sha384_cbc_des,
&safexcel_alg_rfc4106_gcm,
&safexcel_alg_rfc4543_gcm,
&safexcel_alg_rfc4309_ccm,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
{
int i, j, ret = 0;
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
safexcel_algs[i]->priv = priv;
/* Do we have all required base algorithms available? */
if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
safexcel_algs[i]->algo_mask)
/* No, so don't register this ciphersuite */
continue;
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
else
ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
if (ret)
goto fail;
}
return 0;
fail:
for (j = 0; j < i; j++) {
/* Do we have all required base algorithms available? */
if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
safexcel_algs[j]->algo_mask)
/* No, so don't unregister this ciphersuite */
continue;
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
else
crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
}
return ret;
}
static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
{
int i;
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
/* Do we have all required base algorithms available? */
if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
safexcel_algs[i]->algo_mask)
/* No, so don't unregister this ciphersuite */
continue;
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
else
crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
}
}
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
priv->config.pes = priv->hwconfig.hwnumpes;
priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
/* Cannot currently support more rings than we have ring AICs! */
priv->config.rings = min_t(u32, priv->config.rings,
priv->hwconfig.hwnumraic);
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
/* res token is behind the descr, but ofs must be rounded to buswdth */
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
/* now the size of the descr is this 1st part plus the result struct */
priv->config.rd_size = priv->config.res_offset +
EIP197_RD64_RESULT_SIZE;
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
/* convert dwords to bytes */
priv->config.cd_offset *= sizeof(u32);
priv->config.cdsh_offset *= sizeof(u32);
priv->config.rd_offset *= sizeof(u32);
priv->config.res_offset *= sizeof(u32);
}
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
struct safexcel_register_offsets *offsets = &priv->offsets;
if (priv->flags & SAFEXCEL_HW_EIP197) {
offsets->hia_aic = EIP197_HIA_AIC_BASE;
offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
offsets->hia_dfe = EIP197_HIA_DFE_BASE;
offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
offsets->hia_dse = EIP197_HIA_DSE_BASE;
offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
offsets->pe = EIP197_PE_BASE;
offsets->global = EIP197_GLOBAL_BASE;
} else {
offsets->hia_aic = EIP97_HIA_AIC_BASE;
offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
offsets->hia_dfe = EIP97_HIA_DFE_BASE;
offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
offsets->hia_dse = EIP97_HIA_DSE_BASE;
offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
offsets->pe = EIP97_PE_BASE;
offsets->global = EIP97_GLOBAL_BASE;
}
}
/*
* Generic part of probe routine, shared by platform and PCI driver
*
* Assumes IO resources have been mapped, private data mem has been allocated,
* clocks have been enabled, device pointer has been assigned etc.
*
*/
static int safexcel_probe_generic(void *pdev,
struct safexcel_crypto_priv *priv,
int is_pci_dev)
{
struct device *dev = priv->dev;
u32 peid, version, mask, val, hiaopt, hwopt, peopt;
int i, ret, hwctg;
priv->context_pool = dmam_pool_create("safexcel-context", dev,
sizeof(struct safexcel_context_record),
1, 0);
if (!priv->context_pool)
return -ENOMEM;
/*
* First try the EIP97 HIA version regs
* For the EIP197, this is guaranteed to NOT return any of the test
* values
*/
version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
mask = 0; /* do not swap */
if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
} else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
/* read back byte-swapped, so complement byte swap bits */
mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
} else {
/* So it wasn't an EIP97 ... maybe it's an EIP197? */
version = readl(priv->base + EIP197_HIA_AIC_BASE +
EIP197_HIA_VERSION);
if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
priv->flags |= SAFEXCEL_HW_EIP197;
} else if (EIP197_REG_HI16(version) ==
EIP197_HIA_VERSION_BE) {
/* read back byte-swapped, so complement swap bits */
mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
priv->flags |= SAFEXCEL_HW_EIP197;
} else {
return -ENODEV;
}
}
/* Now initialize the reg offsets based on the probing info so far */
safexcel_init_register_offsets(priv);
/*
* If the version was read byte-swapped, we need to flip the device
* swapping Keep in mind here, though, that what we write will also be
* byte-swapped ...
*/
if (mask) {
val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
val = val ^ (mask >> 24); /* toggle byte swap bits */
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
}
/*
* We're not done probing yet! We may fall through to here if no HIA
* was found at all. So, with the endianness presumably correct now and
* the offsets setup, *really* probe for the EIP97/EIP197.
*/
version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
if (((priv->flags & SAFEXCEL_HW_EIP197) &&
(EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
(EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
((!(priv->flags & SAFEXCEL_HW_EIP197) &&
(EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
/*
* We did not find the device that matched our initial probing
* (or our initial probing failed) Report appropriate error.
*/
dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
version);
return -ENODEV;
}
priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
hwctg = version >> 28;
peid = version & 255;
/* Detect EIP206 processing pipe */
version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
return -ENODEV;
}
priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
/* Detect EIP96 packet engine and version */
version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
return -ENODEV;
}
priv->hwconfig.pever = EIP197_VERSION_MASK(version);
hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
priv->hwconfig.icever = 0;
priv->hwconfig.ocever = 0;
priv->hwconfig.psever = 0;
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197 */
peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
EIP197_HWDATAW_MASK;
priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
EIP197_CFSIZE_MASK) +
EIP197_CFSIZE_ADJUST;
priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
EIP197_RFSIZE_MASK) +
EIP197_RFSIZE_ADJUST;
priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
EIP197_N_PES_MASK;
priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
EIP197_N_RINGS_MASK;
if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
priv->flags |= EIP197_PE_ARB;
if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
priv->flags |= EIP197_ICE;
/* Detect ICE EIP207 class. engine and version */
version = readl(EIP197_PE(priv) +
EIP197_PE_ICE_VERSION(0));
if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
peid);
return -ENODEV;
}
priv->hwconfig.icever = EIP197_VERSION_MASK(version);
}
if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
priv->flags |= EIP197_OCE;
/* Detect EIP96PP packet stream editor and version */
version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
return -ENODEV;
}
priv->hwconfig.psever = EIP197_VERSION_MASK(version);
/* Detect OCE EIP207 class. engine and version */
version = readl(EIP197_PE(priv) +
EIP197_PE_ICE_VERSION(0));
if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
peid);
return -ENODEV;
}
priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
}
/* If not a full TRC, then assume simple TRC */
if (!(hwopt & EIP197_OPT_HAS_TRC))
priv->flags |= EIP197_SIMPLE_TRC;
/* EIP197 always has SOME form of TRC */
priv->flags |= EIP197_TRC_CACHE;
} else {
/* EIP97 */
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
EIP97_HWDATAW_MASK;
priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
EIP97_CFSIZE_MASK;
priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
EIP97_RFSIZE_MASK;
priv->hwconfig.hwnumpes = 1; /* by definition */
priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
EIP197_N_RINGS_MASK;
}
/* Scan for ring AIC's */
for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
version = readl(EIP197_HIA_AIC_R(priv) +
EIP197_HIA_AIC_R_VERSION(i));
if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
break;
}
priv->hwconfig.hwnumraic = i;
/* Low-end EIP196 may not have any ring AIC's ... */
if (!priv->hwconfig.hwnumraic) {
dev_err(priv->dev, "No ring interrupt controller present!\n");
return -ENODEV;
}
/* Get supported algorithms from EIP96 transform engine */
priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
EIP197_PE_EIP96_OPTIONS(0));
/* Print single info line describing what we just detected */
dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
priv->hwconfig.ppver, priv->hwconfig.pever,
priv->hwconfig.algo_flags, priv->hwconfig.icever,
priv->hwconfig.ocever, priv->hwconfig.psever);
safexcel_configure(priv);
if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) {
/*
* Request MSI vectors for global + 1 per ring -
* or just 1 for older dev images
*/
struct pci_dev *pci_pdev = pdev;
ret = pci_alloc_irq_vectors(pci_pdev,
priv->config.rings + 1,
priv->config.rings + 1,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
return ret;
}
}
/* Register the ring IRQ handlers and configure the rings */
priv->ring = devm_kcalloc(dev, priv->config.rings,
sizeof(*priv->ring),
GFP_KERNEL);
if (!priv->ring)
return -ENOMEM;
for (i = 0; i < priv->config.rings; i++) {
char wq_name[9] = {0};
int irq;
struct safexcel_ring_irq_data *ring_irq;
ret = safexcel_init_ring_descriptors(priv,
&priv->ring[i].cdr,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
goto err_cleanup_rings;
}
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
if (!priv->ring[i].rdr_req) {
ret = -ENOMEM;
goto err_cleanup_rings;
}
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
goto err_cleanup_rings;
}
ring_irq->priv = priv;
ring_irq->ring = i;
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
ret = irq;
goto err_cleanup_rings;
}
priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
if (!priv->ring[i].workqueue) {
ret = -ENOMEM;
goto err_cleanup_rings;
}
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
spin_lock_init(&priv->ring[i].lock);
spin_lock_init(&priv->ring[i].queue_lock);
}
atomic_set(&priv->ring_used, 0);
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
goto err_cleanup_rings;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
goto err_cleanup_rings;
}
return 0;
err_cleanup_rings:
for (i = 0; i < priv->config.rings; i++) {
if (priv->ring[i].irq)
irq_set_affinity_hint(priv->ring[i].irq, NULL);
if (priv->ring[i].workqueue)
destroy_workqueue(priv->ring[i].workqueue);
}
return ret;
}
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
{
int i;
for (i = 0; i < priv->config.rings; i++) {
/* clear any pending interrupt */
writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* Reset the CDR base address */
writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
/* Reset the RDR base address */
writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
}
}
/* for Device Tree platform driver */
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct safexcel_crypto_priv *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(dev, "failed to get resource\n");
return PTR_ERR(priv->base);
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
ret = PTR_ERR_OR_ZERO(priv->clk);
/* The clock isn't mandatory */
if (ret != -ENOENT) {
if (ret)
return ret;
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "unable to enable clk (%d)\n", ret);
return ret;
}
}
priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
ret = PTR_ERR_OR_ZERO(priv->reg_clk);
/* The clock isn't mandatory */
if (ret != -ENOENT) {
if (ret)
goto err_core_clk;
ret = clk_prepare_enable(priv->reg_clk);
if (ret) {
dev_err(dev, "unable to enable reg clk (%d)\n", ret);
goto err_core_clk;
}
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
goto err_reg_clk;
/* Generic EIP97/EIP197 device probing */
ret = safexcel_probe_generic(pdev, priv, 0);
if (ret)
goto err_reg_clk;
return 0;
err_reg_clk:
clk_disable_unprepare(priv->reg_clk);
err_core_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
static int safexcel_remove(struct platform_device *pdev)
{
struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
int i;
safexcel_unregister_algorithms(priv);
safexcel_hw_reset_rings(priv);
clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++) {
irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
}
return 0;
}
static const struct safexcel_priv_data eip97ies_mrvl_data = {
.version = EIP97IES_MRVL,
};
static const struct safexcel_priv_data eip197b_mrvl_data = {
.version = EIP197B_MRVL,
};
static const struct safexcel_priv_data eip197d_mrvl_data = {
.version = EIP197D_MRVL,
};
static const struct safexcel_priv_data eip197_devbrd_data = {
.version = EIP197_DEVBRD,
};
static const struct safexcel_priv_data eip197c_mxl_data = {
.version = EIP197C_MXL,
.fw_little_endian = true,
};
static const struct of_device_id safexcel_of_match_table[] = {
{
.compatible = "inside-secure,safexcel-eip97ies",
.data = &eip97ies_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197b",
.data = &eip197b_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197d",
.data = &eip197d_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197c-mxl",
.data = &eip197c_mxl_data,
},
/* For backward compatibility and intended for generic use */
{
.compatible = "inside-secure,safexcel-eip97",
.data = &eip97ies_mrvl_data,
},
{
.compatible = "inside-secure,safexcel-eip197",
.data = &eip197b_mrvl_data,
},
{},
};
MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
.remove = safexcel_remove,
.driver = {
.name = "crypto-safexcel",
.of_match_table = safexcel_of_match_table,
},
};
/* PCIE devices - i.e. Inside Secure development boards */
static int safexcel_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct safexcel_crypto_priv *priv;
void __iomem *pciebase;
int rc;
u32 val;
dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
ent->vendor, ent->device, ent->subvendor,
ent->subdevice, ent->driver_data);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->data = (struct safexcel_priv_data *)ent->driver_data;
pci_set_drvdata(pdev, priv);
/* enable the device */
rc = pcim_enable_device(pdev);
if (rc) {
dev_err(dev, "Failed to enable PCI device\n");
return rc;
}
/* take ownership of PCI BAR0 */
rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
if (rc) {
dev_err(dev, "Failed to map IO region for BAR0\n");
return rc;
}
priv->base = pcim_iomap_table(pdev)[0];
if (priv->data->version == EIP197_DEVBRD) {
dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
if (rc) {
dev_err(dev, "Failed to map IO region for BAR4\n");
return rc;
}
pciebase = pcim_iomap_table(pdev)[2];
val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
(val & 0xff));
/* Setup MSI identity map mapping */
writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
/* Enable all device interrupts */
writel(GENMASK(31, 0),
pciebase + EIP197_XLX_USER_INT_ENB_MSK);
} else {
dev_err(dev, "Unrecognised IRQ block identifier %x\n",
val);
return -ENODEV;
}
/* HW reset FPGA dev board */
/* assert reset */
writel(1, priv->base + EIP197_XLX_GPIO_BASE);
wmb(); /* maintain strict ordering for accesses here */
/* deassert reset */
writel(0, priv->base + EIP197_XLX_GPIO_BASE);
wmb(); /* maintain strict ordering for accesses here */
}
/* enable bus mastering */
pci_set_master(pdev);
/* Generic EIP97/EIP197 device probing */
rc = safexcel_probe_generic(pdev, priv, 1);
return rc;
}
static void safexcel_pci_remove(struct pci_dev *pdev)
{
struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
int i;
safexcel_unregister_algorithms(priv);
for (i = 0; i < priv->config.rings; i++)
destroy_workqueue(priv->ring[i].workqueue);
safexcel_hw_reset_rings(priv);
}
static const struct pci_device_id safexcel_pci_ids[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
0x16ae, 0xc522),
.driver_data = (kernel_ulong_t)&eip197_devbrd_data,
},
{},
};
MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
static struct pci_driver safexcel_pci_driver = {
.name = "crypto-safexcel",
.id_table = safexcel_pci_ids,
.probe = safexcel_pci_probe,
.remove = safexcel_pci_remove,
};
static int __init safexcel_init(void)
{
int ret;
/* Register PCI driver */
ret = pci_register_driver(&safexcel_pci_driver);
/* Register platform driver */
if (IS_ENABLED(CONFIG_OF) && !ret) {
ret = platform_driver_register(&crypto_safexcel);
if (ret)
pci_unregister_driver(&safexcel_pci_driver);
}
return ret;
}
static void __exit safexcel_exit(void)
{
/* Unregister platform driver */
if (IS_ENABLED(CONFIG_OF))
platform_driver_unregister(&crypto_safexcel);
/* Unregister PCI driver if successfully registered before */
pci_unregister_driver(&safexcel_pci_driver);
}
module_init(safexcel_init);
module_exit(safexcel_exit);
MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
MODULE_FIRMWARE("ifpp.bin");
MODULE_FIRMWARE("ipue.bin");
MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");
| linux-master | drivers/crypto/inside-secure/safexcel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
#include <crypto/chacha.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
#include <crypto/poly1305.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sm3.h>
#include <crypto/sm4.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "safexcel.h"
enum safexcel_cipher_direction {
SAFEXCEL_ENCRYPT,
SAFEXCEL_DECRYPT,
};
enum safexcel_cipher_alg {
SAFEXCEL_DES,
SAFEXCEL_3DES,
SAFEXCEL_AES,
SAFEXCEL_CHACHA20,
SAFEXCEL_SM4,
};
struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
u32 mode;
enum safexcel_cipher_alg alg;
u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
u8 aadskip;
u8 blocksz;
u32 ivmask;
u32 ctrinit;
__le32 key[16];
u32 nonce;
unsigned int key_len, xts;
/* All the below is AEAD specific */
u32 hash_alg;
u32 state_sz;
struct crypto_aead *fback;
};
struct safexcel_cipher_req {
enum safexcel_cipher_direction direction;
/* Number of result descriptors associated to the request */
unsigned int rdescs;
bool needs_inv;
int nr_src, nr_dst;
};
static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc)
{
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
/* 32 bit counter, start at 0 or 1 (big endian!) */
cdesc->control_data.token[3] =
(__force u32)cpu_to_be32(ctx->ctrinit);
return 4;
}
if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 96 bit nonce part */
memcpy(&cdesc->control_data.token[0], &iv[4], 12);
/* 32 bit counter */
cdesc->control_data.token[3] = *(u32 *)iv;
return 4;
}
cdesc->control_data.options |= ctx->ivmask;
memcpy(cdesc->control_data.token, iv, ctx->blocksz);
return ctx->blocksz / sizeof(u32);
}
static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
struct safexcel_token *atoken,
u32 length)
{
struct safexcel_token *token;
int ivlen;
ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
if (ivlen == 4) {
/* No space in cdesc, instruction moves to atoken */
cdesc->additional_cdata_size = 1;
token = atoken;
} else {
/* Everything fits in cdesc */
token = (struct safexcel_token *)(cdesc->control_data.token + 2);
/* Need to pad with NOP */
eip197_noop_token(&token[1]);
}
token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token->packet_length = length;
token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
EIP197_TOKEN_STAT_LAST_HASH;
token->instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYPTO |
EIP197_TOKEN_INS_TYPE_OUTPUT;
}
static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc)
{
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
/* 32 bit counter, start at 0 or 1 (big endian!) */
cdesc->control_data.token[3] =
(__force u32)cpu_to_be32(ctx->ctrinit);
return;
}
if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
/* 96 bit IV part */
memcpy(&cdesc->control_data.token[0], iv, 12);
/* 32 bit counter, start at 0 or 1 (big endian!) */
cdesc->control_data.token[3] =
(__force u32)cpu_to_be32(ctx->ctrinit);
return;
}
/* CBC */
memcpy(cdesc->control_data.token, iv, ctx->blocksz);
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
struct safexcel_token *atoken,
enum safexcel_cipher_direction direction,
u32 cryptlen, u32 assoclen, u32 digestsize)
{
struct safexcel_token *aadref;
int atoksize = 2; /* Start with minimum size */
int assocadj = assoclen - ctx->aadskip, aadalign;
/* Always 4 dwords of embedded IV for AEAD modes */
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
if (direction == SAFEXCEL_DECRYPT)
cryptlen -= digestsize;
if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
/* Construct IV block B0 for the CBC-MAC */
u8 *final_iv = (u8 *)cdesc->control_data.token;
u8 *cbcmaciv = (u8 *)&atoken[1];
__le32 *aadlen = (__le32 *)&atoken[5];
if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
/* Length + nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* Fixup flags byte */
*(__le32 *)cbcmaciv =
cpu_to_le32(ctx->nonce |
((assocadj > 0) << 6) |
((digestsize - 2) << 2));
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
memcpy(cbcmaciv + 4, iv, 8);
/* Start counter at 0 */
cdesc->control_data.token[3] = 0;
/* Message length */
*(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
} else {
/* Variable length IV part */
memcpy(final_iv, iv, 15 - iv[0]);
memcpy(cbcmaciv, iv, 15 - iv[0]);
/* Start variable length counter at 0 */
memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
/* fixup flags byte */
cbcmaciv[0] |= ((assocadj > 0) << 6) |
((digestsize - 2) << 2);
/* insert lower 2 bytes of message length */
cbcmaciv[14] = cryptlen >> 8;
cbcmaciv[15] = cryptlen & 255;
}
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
atoken->packet_length = AES_BLOCK_SIZE +
((assocadj > 0) << 1);
atoken->stat = 0;
atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
EIP197_TOKEN_INS_TYPE_HASH;
if (likely(assocadj)) {
*aadlen = cpu_to_le32((assocadj >> 8) |
(assocadj & 255) << 8);
atoken += 6;
atoksize += 7;
} else {
atoken += 5;
atoksize += 6;
}
/* Process AAD data */
aadref = atoken;
atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
atoken->packet_length = assocadj;
atoken->stat = 0;
atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
atoken++;
/* For CCM only, align AAD data towards hash engine */
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
aadalign = (assocadj + 2) & 15;
atoken->packet_length = assocadj && aadalign ?
16 - aadalign :
0;
if (likely(cryptlen)) {
atoken->stat = 0;
atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
atoken->instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_HASH;
}
} else {
safexcel_aead_iv(ctx, iv, cdesc);
/* Process AAD data */
aadref = atoken;
atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
atoken->packet_length = assocadj;
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
atoken->instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_HASH;
}
atoken++;
if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
/* For ESP mode (and not GMAC), skip over the IV */
atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
atoken->stat = 0;
atoken->instructions = 0;
atoken++;
atoksize++;
} else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
direction == SAFEXCEL_DECRYPT)) {
/* Poly-chacha decryption needs a dummy NOP here ... */
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
atoken->packet_length = 16; /* According to Op Manual */
atoken->stat = 0;
atoken->instructions = 0;
atoken++;
atoksize++;
}
if (ctx->xcm) {
/* For GCM and CCM, obtain enc(Y0) */
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
atoken->packet_length = 0;
atoken->stat = 0;
atoken->instructions = AES_BLOCK_SIZE;
atoken++;
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
atoken->packet_length = AES_BLOCK_SIZE;
atoken->stat = 0;
atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_TYPE_CRYPTO;
atoken++;
atoksize += 2;
}
if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
/* Fixup stat field for AAD direction instruction */
aadref->stat = 0;
/* Process crypto data */
atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
atoken->packet_length = cryptlen;
if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
/* Fixup instruction field for AAD dir instruction */
aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
/* Do not send to crypt engine in case of GMAC */
atoken->instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_HASH |
EIP197_TOKEN_INS_TYPE_OUTPUT;
} else {
atoken->instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYPTO |
EIP197_TOKEN_INS_TYPE_HASH |
EIP197_TOKEN_INS_TYPE_OUTPUT;
}
cryptlen &= 15;
if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
atoken->stat = 0;
/* For CCM only, pad crypto data to the hash engine */
atoken++;
atoksize++;
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
atoken->packet_length = 16 - cryptlen;
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
}
atoken++;
atoksize++;
}
if (direction == SAFEXCEL_ENCRYPT) {
/* Append ICV */
atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
atoken->packet_length = digestsize;
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
} else {
/* Extract ICV */
atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
atoken->packet_length = digestsize;
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
atoken++;
atoksize++;
/* Verify ICV */
atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
atoken->packet_length = digestsize |
EIP197_TOKEN_HASH_RESULT_VERIFY;
atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
}
/* Fixup length of the token in the command descriptor */
cdesc->additional_cdata_size = atoksize;
}
static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
ret = aes_expandkey(&aes, key, len);
if (ret)
return ret;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < len / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
ctx->key_len = len;
memzero_explicit(&aes, sizeof(aes));
return 0;
}
static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_authenc_keys keys;
struct crypto_aes_ctx aes;
int err = -EINVAL, i;
const char *alg;
if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
goto badkey;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
/* Must have at least space for the nonce here */
if (unlikely(keys.enckeylen < CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
CTR_RFC3686_NONCE_SIZE);
/* exclude the nonce here */
keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
}
/* Encryption key */
switch (ctx->alg) {
case SAFEXCEL_DES:
err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
goto badkey;
break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
goto badkey;
break;
case SAFEXCEL_AES:
err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
if (unlikely(err))
goto badkey;
break;
case SAFEXCEL_SM4:
if (unlikely(keys.enckeylen != SM4_KEY_SIZE))
goto badkey;
break;
default:
dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
goto badkey;
}
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keys.enckeylen / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) !=
((u32 *)keys.enckey)[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
/* Auth key */
switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
alg = "safexcel-sha1";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
alg = "safexcel-sha224";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
alg = "safexcel-sha256";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
alg = "safexcel-sha384";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
alg = "safexcel-sha512";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
alg = "safexcel-sm3";
break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
}
if (safexcel_hmac_setkey(&ctx->base, keys.authkey, keys.authkeylen,
alg, ctx->state_sz))
goto badkey;
/* Now copy the keys into the context */
for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
ctx->key_len = keys.enckeylen;
memzero_explicit(&keys, sizeof(keys));
return 0;
badkey:
memzero_explicit(&keys, sizeof(keys));
return err;
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_cipher_req *sreq,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ctrl_size = ctx->key_len / sizeof(u32);
cdesc->control_data.control1 = ctx->mode;
if (ctx->aead) {
/* Take in account the ipad+opad digests */
if (ctx->xcm) {
ctrl_size += ctx->state_sz / sizeof(u32);
cdesc->control_data.control0 =
CONTEXT_CONTROL_KEY_EN |
CONTEXT_CONTROL_DIGEST_XCM |
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
} else if (ctx->alg == SAFEXCEL_CHACHA20) {
/* Chacha20-Poly1305 */
cdesc->control_data.control0 =
CONTEXT_CONTROL_KEY_EN |
CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 |
(sreq->direction == SAFEXCEL_ENCRYPT ?
CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT :
CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN) |
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
return 0;
} else {
ctrl_size += ctx->state_sz / sizeof(u32) * 2;
cdesc->control_data.control0 =
CONTEXT_CONTROL_KEY_EN |
CONTEXT_CONTROL_DIGEST_HMAC |
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
}
if (sreq->direction == SAFEXCEL_ENCRYPT &&
(ctx->xcm == EIP197_XCM_MODE_CCM ||
ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC))
cdesc->control_data.control0 |=
CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT;
else if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |=
CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
else if (ctx->xcm == EIP197_XCM_MODE_CCM)
cdesc->control_data.control0 |=
CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN;
else
cdesc->control_data.control0 |=
CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
} else {
if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 =
CONTEXT_CONTROL_TYPE_CRYPTO_OUT |
CONTEXT_CONTROL_KEY_EN |
CONTEXT_CONTROL_SIZE(ctrl_size);
else
cdesc->control_data.control0 =
CONTEXT_CONTROL_TYPE_CRYPTO_IN |
CONTEXT_CONTROL_KEY_EN |
CONTEXT_CONTROL_SIZE(ctrl_size);
}
if (ctx->alg == SAFEXCEL_DES) {
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_DES;
} else if (ctx->alg == SAFEXCEL_3DES) {
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_3DES;
} else if (ctx->alg == SAFEXCEL_AES) {
switch (ctx->key_len >> ctx->xts) {
case AES_KEYSIZE_128:
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_AES128;
break;
case AES_KEYSIZE_192:
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_AES192;
break;
case AES_KEYSIZE_256:
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_AES256;
break;
default:
dev_err(priv->dev, "aes keysize not supported: %u\n",
ctx->key_len >> ctx->xts);
return -EINVAL;
}
} else if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20;
} else if (ctx->alg == SAFEXCEL_SM4) {
cdesc->control_data.control0 |=
CONTEXT_CONTROL_CRYPTO_ALG_SM4;
}
return 0;
}
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int cryptlen,
struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
struct skcipher_request *areq = skcipher_request_cast(async);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct safexcel_result_desc *rdesc;
int ndesc = 0;
*ret = 0;
if (unlikely(!sreq->rdescs))
return 0;
while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"cipher: result: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
break;
}
if (likely(!*ret))
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
}
safexcel_complete(priv, ring);
if (src == dst) {
if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_BIDIRECTIONAL);
} else {
if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_TO_DEVICE);
if (sreq->nr_dst > 0)
dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
DMA_FROM_DEVICE);
}
/*
* Update IV in req from last crypto output word for CBC modes
*/
if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
(sreq->direction == SAFEXCEL_ENCRYPT)) {
/* For encrypt take the last output word */
sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv,
crypto_skcipher_ivsize(skcipher),
(cryptlen -
crypto_skcipher_ivsize(skcipher)));
}
*should_complete = true;
return ndesc;
}
static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen,
unsigned int digestsize, u8 *iv, int *commands,
int *results)
{
struct skcipher_request *areq = skcipher_request_cast(base);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc;
struct safexcel_command_desc *first_cdesc = NULL;
struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
struct scatterlist *sg;
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
sreq->nr_src = sg_nents_for_len(src, totlen_src);
if (ctx->aead) {
/*
* AEAD has auth tag appended to output for encrypt and
* removed from the output for decrypt!
*/
if (sreq->direction == SAFEXCEL_DECRYPT)
totlen_dst -= digestsize;
else
totlen_dst += digestsize;
memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
&ctx->base.ipad, ctx->state_sz);
if (!ctx->xcm)
memcpy(ctx->base.ctxr->data + (ctx->key_len +
ctx->state_sz) / sizeof(u32), &ctx->base.opad,
ctx->state_sz);
} else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
(sreq->direction == SAFEXCEL_DECRYPT)) {
/*
* Save IV from last crypto input word for CBC modes in decrypt
* direction. Need to do this first in case of inplace operation
* as it will be overwritten.
*/
sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv,
crypto_skcipher_ivsize(skcipher),
(totlen_src -
crypto_skcipher_ivsize(skcipher)));
}
sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
/*
* Remember actual input length, source buffer length may be
* updated in case of inline operation below.
*/
totlen = totlen_src;
queued = totlen_src;
if (src == dst) {
sreq->nr_src = max(sreq->nr_src, sreq->nr_dst);
sreq->nr_dst = sreq->nr_src;
if (unlikely((totlen_src || totlen_dst) &&
(sreq->nr_src <= 0))) {
dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!",
max(totlen_src, totlen_dst));
return -EINVAL;
}
if (sreq->nr_src > 0)
dma_map_sg(priv->dev, src, sreq->nr_src,
DMA_BIDIRECTIONAL);
} else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
totlen_src);
return -EINVAL;
}
if (sreq->nr_src > 0)
dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
totlen_dst);
ret = -EINVAL;
goto unmap;
}
if (sreq->nr_dst > 0)
dma_map_sg(priv->dev, dst, sreq->nr_dst,
DMA_FROM_DEVICE);
}
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
if (!totlen) {
/*
* The EIP97 cannot deal with zero length input packets!
* So stuff a dummy command descriptor indicating a 1 byte
* (dummy) input packet, using the context record as source.
*/
first_cdesc = safexcel_add_cdesc(priv, ring,
1, 1, ctx->base.ctxr_dma,
1, 1, ctx->base.ctxr_dma,
&atoken);
if (IS_ERR(first_cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(first_cdesc);
goto cdesc_rollback;
}
n_cdesc = 1;
goto skip_cdesc;
}
/* command descriptors */
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - len),
sg_dma_address(sg), len, totlen,
ctx->base.ctxr_dma, &atoken);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
goto cdesc_rollback;
}
if (!n_cdesc)
first_cdesc = cdesc;
n_cdesc++;
queued -= len;
if (!queued)
break;
}
skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
if (ctx->aead)
safexcel_aead_token(ctx, iv, first_cdesc, atoken,
sreq->direction, cryptlen,
assoclen, digestsize);
else
safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
/* result descriptors */
for_each_sg(dst, sg, sreq->nr_dst, i) {
bool last = (i == sreq->nr_dst - 1);
u32 len = sg_dma_len(sg);
/* only allow the part of the buffer we know we need */
if (len > totlen_dst)
len = totlen_dst;
if (unlikely(!len))
break;
totlen_dst -= len;
/* skip over AAD space in buffer - not written */
if (assoclen) {
if (assoclen >= len) {
assoclen -= len;
continue;
}
rdesc = safexcel_add_rdesc(priv, ring, first, last,
sg_dma_address(sg) +
assoclen,
len - assoclen);
assoclen = 0;
} else {
rdesc = safexcel_add_rdesc(priv, ring, first, last,
sg_dma_address(sg),
len);
}
if (IS_ERR(rdesc)) {
/* No space left in the result descriptor ring */
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
if (first) {
first_rdesc = rdesc;
first = false;
}
n_rdesc++;
}
if (unlikely(first)) {
/*
* Special case: AEAD decrypt with only AAD data.
* In this case there is NO output data from the engine,
* but the engine still needs a result descriptor!
* Create a dummy one just for catching the result token.
*/
rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
if (IS_ERR(rdesc)) {
/* No space left in the result descriptor ring */
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
first_rdesc = rdesc;
n_rdesc = 1;
}
safexcel_rdr_req_set(priv, ring, first_rdesc, base);
*commands = n_cdesc;
*results = n_rdesc;
return 0;
rdesc_rollback:
for (i = 0; i < n_rdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap:
if (src == dst) {
if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_BIDIRECTIONAL);
} else {
if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_TO_DEVICE);
if (sreq->nr_dst > 0)
dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
DMA_FROM_DEVICE);
}
return ret;
}
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_result_desc *rdesc;
int ndesc = 0, enq_ret;
*ret = 0;
if (unlikely(!sreq->rdescs))
return 0;
while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"cipher: invalidate: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
break;
}
if (likely(!*ret))
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
}
safexcel_complete(priv, ring);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
ctx->base.ctxr_dma);
*should_complete = true;
return ndesc;
}
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
*should_complete = false;
return ndesc;
}
static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int err;
if (sreq->needs_inv) {
sreq->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
req->dst, req->cryptlen, sreq,
should_complete, ret);
}
return err;
}
static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
int err;
if (sreq->needs_inv) {
sreq->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
req->dst,
req->cryptlen + crypto_aead_authsize(tfm),
sreq, should_complete, ret);
}
return err;
}
static int safexcel_cipher_send_inv(struct crypto_async_request *base,
int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
*commands = 1;
*results = 1;
return 0;
}
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv) {
ret = safexcel_cipher_send_inv(async, ring, commands, results);
} else {
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
u8 input_iv[AES_BLOCK_SIZE];
/*
* Save input IV in case of CBC decrypt mode
* Will be overwritten with output IV prior to use!
*/
memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, input_iv,
commands, results);
}
sreq->rdescs = *results;
return ret;
}
static int safexcel_aead_send(struct crypto_async_request *async, int ring,
int *commands, int *results)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
sreq->rdescs = *results;
return ret;
}
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
struct crypto_wait *result)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ring = ctx->base.ring;
int err;
ctx = crypto_tfm_ctx(base->tfm);
ctx->base.exit_inv = true;
sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
err = crypto_wait_req(-EINPROGRESS, result);
if (err) {
dev_warn(priv->dev,
"cipher: sync: invalidate: completion error %d\n",
err);
return err;
}
return 0;
}
static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
DECLARE_CRYPTO_WAIT(result);
memset(req, 0, sizeof(struct skcipher_request));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
DECLARE_CRYPTO_WAIT(result);
memset(req, 0, sizeof(struct aead_request));
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
aead_request_set_tfm(req, __crypto_aead_cast(tfm));
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
static int safexcel_queue_req(struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
enum safexcel_cipher_direction dir)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
sreq->needs_inv = false;
sreq->direction = dir;
if (ctx->base.ctxr) {
if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(*base),
&ctx->base.ctxr_dma);
if (!ctx->base.ctxr)
return -ENOMEM;
}
ring = ctx->base.ring;
spin_lock_bh(&priv->ring[ring].queue_lock);
ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
return ret;
}
static int safexcel_encrypt(struct skcipher_request *req)
{
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
SAFEXCEL_ENCRYPT);
}
static int safexcel_decrypt(struct skcipher_request *req)
{
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
SAFEXCEL_DECRYPT);
}
static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_alg_template *tmpl =
container_of(tfm->__crt_alg, struct safexcel_alg_template,
alg.skcipher.base);
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct safexcel_cipher_req));
ctx->base.priv = tmpl->priv;
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
ctx->ctrinit = 1;
return 0;
}
static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
memzero_explicit(ctx->key, sizeof(ctx->key));
/* context not allocated, skip invalidation */
if (!ctx->base.ctxr)
return -ENOMEM;
memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data));
return 0;
}
static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
if (safexcel_cipher_cra_exit(tfm))
return;
if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_skcipher_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "skcipher: invalidation error %d\n",
ret);
} else {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
ctx->base.ctxr_dma);
}
}
static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
if (safexcel_cipher_cra_exit(tfm))
return;
if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_aead_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "aead: invalidation error %d\n",
ret);
} else {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
ctx->base.ctxr_dma);
}
}
static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
ctx->blocksz = 0;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_AES,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_aes_ecb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_AES,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_aes_cbc_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
struct safexcel_alg_template safexcel_alg_cfb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "cfb(aes)",
.cra_driver_name = "safexcel-cfb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_aes_cfb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
struct safexcel_alg_template safexcel_alg_ofb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ofb(aes)",
.cra_driver_name = "safexcel-ofb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_aes_ofb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
unsigned int keylen;
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
/* exclude the nonce here */
keylen = len - CTR_RFC3686_NONCE_SIZE;
ret = aes_expandkey(&aes, key, keylen);
if (ret)
return ret;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < keylen / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
ctx->key_len = keylen;
memzero_explicit(&aes, sizeof(aes));
return 0;
}
static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
struct safexcel_alg_template safexcel_alg_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_AES,
.alg.skcipher = {
.setkey = safexcel_skcipher_aesctr_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
/* Add nonce size */
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.base = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "safexcel-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_aes_ctr_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
if (ret)
return ret;
/* if context exits and key changed, need to invalidate it */
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
memcpy(ctx->key, key, len);
ctx->key_len = len;
return 0;
}
static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
struct safexcel_alg_template safexcel_alg_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_DES,
.alg.skcipher = {
.setkey = safexcel_des_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.base = {
.cra_name = "cbc(des)",
.cra_driver_name = "safexcel-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_des_cbc_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
ctx->blocksz = 0;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_ecb_des = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_DES,
.alg.skcipher = {
.setkey = safexcel_des_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "safexcel-ecb-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_des_ecb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
if (err)
return err;
/* if context exits and key changed, need to invalidate it */
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
memcpy(ctx->key, key, len);
ctx->key_len = len;
return 0;
}
static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_DES,
.alg.skcipher = {
.setkey = safexcel_des3_ede_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "safexcel-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_des3_cbc_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
ctx->blocksz = 0;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_DES,
.alg.skcipher = {
.setkey = safexcel_des3_ede_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "safexcel-ecb-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_des3_ecb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
}
static int safexcel_aead_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
}
static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_alg_template *tmpl =
container_of(tfm->__crt_alg, struct safexcel_alg_template,
alg.aead.base);
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
sizeof(struct safexcel_cipher_req));
ctx->base.priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
ctx->blocksz = AES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
ctx->ctrinit = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
ctx->base.handle_result = safexcel_aead_handle_result;
return 0;
}
static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha1_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha256_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha224_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
ctx->state_sz = SHA512_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha512_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
ctx->state_sz = SHA512_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha384_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha1_des3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha256_des3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha224_des3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha512_des3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
ctx->blocksz = DES3_EDE_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha384_des3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha1_des_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha256_des_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha224_des_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha512_des_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
ctx->blocksz = DES_BLOCK_SIZE;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha384_des_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha1_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha1_ctr_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha256_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha256_ctr_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha224_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha224_ctr_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha512_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
.cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha512_ctr_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sha384_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
.cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sha384_ctr_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
unsigned int keylen;
/* Check for illegal XTS keys */
ret = xts_verify_key(ctfm, key, len);
if (ret)
return ret;
/* Only half of the key data is cipher key */
keylen = (len >> 1);
ret = aes_expandkey(&aes, key, keylen);
if (ret)
return ret;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < keylen / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
/* The other half is the tweak key */
ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
if (ret)
return ret;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i + keylen / sizeof(u32)]) !=
aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < keylen / sizeof(u32); i++)
ctx->key[i + keylen / sizeof(u32)] =
cpu_to_le32(aes.key_enc[i]);
ctx->key_len = keylen << 1;
memzero_explicit(&aes, sizeof(aes));
return 0;
}
static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->blocksz = AES_BLOCK_SIZE;
ctx->xts = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
return 0;
}
static int safexcel_encrypt_xts(struct skcipher_request *req)
{
if (req->cryptlen < XTS_BLOCK_SIZE)
return -EINVAL;
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
SAFEXCEL_ENCRYPT);
}
static int safexcel_decrypt_xts(struct skcipher_request *req)
{
if (req->cryptlen < XTS_BLOCK_SIZE)
return -EINVAL;
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
SAFEXCEL_DECRYPT);
}
struct safexcel_alg_template safexcel_alg_xts_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XTS,
.alg.skcipher = {
.setkey = safexcel_skcipher_aesxts_setkey,
.encrypt = safexcel_encrypt_xts,
.decrypt = safexcel_decrypt_xts,
/* XTS actually uses 2 AES keys glued together */
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = XTS_BLOCK_SIZE,
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "safexcel-xts-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = XTS_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_aes_xts_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
u32 hashkey[AES_BLOCK_SIZE >> 2];
int ret, i;
ret = aes_expandkey(&aes, key, len);
if (ret) {
memzero_explicit(&aes, sizeof(aes));
return ret;
}
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < len / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
ctx->key_len = len;
/* Compute hash key by encrypting zeroes with cipher key */
memset(hashkey, 0, AES_BLOCK_SIZE);
aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey);
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
if (be32_to_cpu(ctx->base.ipad.be[i]) != hashkey[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
ctx->base.ipad.be[i] = cpu_to_be32(hashkey[i]);
memzero_explicit(hashkey, AES_BLOCK_SIZE);
memzero_explicit(&aes, sizeof(aes));
return 0;
}
static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH;
ctx->state_sz = GHASH_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_GCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
return 0;
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
{
safexcel_aead_cra_exit(tfm);
}
static int safexcel_aead_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return crypto_gcm_check_authsize(authsize);
}
struct safexcel_alg_template safexcel_alg_gcm = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
.alg.aead = {
.setkey = safexcel_aead_gcm_setkey,
.setauthsize = safexcel_aead_gcm_setauthsize,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "safexcel-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_gcm_cra_init,
.cra_exit = safexcel_aead_gcm_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
ret = aes_expandkey(&aes, key, len);
if (ret) {
memzero_explicit(&aes, sizeof(aes));
return ret;
}
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
}
}
for (i = 0; i < len / sizeof(u32); i++) {
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
ctx->base.ipad.be[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
cpu_to_be32(aes.key_enc[i]);
}
ctx->key_len = len;
ctx->state_sz = 2 * AES_BLOCK_SIZE + len;
if (len == AES_KEYSIZE_192)
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
else if (len == AES_KEYSIZE_256)
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
else
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
memzero_explicit(&aes, sizeof(aes));
return 0;
}
static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
ctx->state_sz = 3 * AES_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_CCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
ctx->ctrinit = 0;
return 0;
}
static int safexcel_aead_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
/* Borrowed from crypto/ccm.c */
switch (authsize) {
case 4:
case 6:
case 8:
case 10:
case 12:
case 14:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int safexcel_ccm_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
if (req->iv[0] < 1 || req->iv[0] > 7)
return -EINVAL;
return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
}
static int safexcel_ccm_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
if (req->iv[0] < 1 || req->iv[0] > 7)
return -EINVAL;
return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
}
struct safexcel_alg_template safexcel_alg_ccm = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
.alg.aead = {
.setkey = safexcel_aead_ccm_setkey,
.setauthsize = safexcel_aead_ccm_setauthsize,
.encrypt = safexcel_ccm_encrypt,
.decrypt = safexcel_ccm_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "safexcel-ccm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_ccm_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
const u8 *key)
{
struct safexcel_crypto_priv *priv = ctx->base.priv;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
ctx->base.needs_inv = true;
memcpy(ctx->key, key, CHACHA_KEY_SIZE);
ctx->key_len = CHACHA_KEY_SIZE;
}
static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
if (len != CHACHA_KEY_SIZE)
return -EINVAL;
safexcel_chacha20_setkey(ctx, key);
return 0;
}
static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
ctx->ctrinit = 0;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
return 0;
}
struct safexcel_alg_template safexcel_alg_chacha20 = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_CHACHA20,
.alg.skcipher = {
.setkey = safexcel_skcipher_chacha20_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.base = {
.cra_name = "chacha20",
.cra_driver_name = "safexcel-chacha20",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_chacha20_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_aead_ctx(ctfm);
if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP &&
len > EIP197_AEAD_IPSEC_NONCE_SIZE) {
/* ESP variant has nonce appended to key */
len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
ctx->nonce = *(u32 *)(key + len);
}
if (len != CHACHA_KEY_SIZE)
return -EINVAL;
safexcel_chacha20_setkey(ctx, key);
return 0;
}
static int safexcel_aead_chachapoly_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
return 0;
}
static int safexcel_aead_chachapoly_crypt(struct aead_request *req,
enum safexcel_cipher_direction dir)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct aead_request *subreq = aead_request_ctx(req);
u32 key[CHACHA_KEY_SIZE / sizeof(u32) + 1];
int ret = 0;
/*
* Instead of wasting time detecting umpteen silly corner cases,
* just dump all "small" requests to the fallback implementation.
* HW would not be faster on such small requests anyway.
*/
if (likely((ctx->aead != EIP197_AEAD_TYPE_IPSEC_ESP ||
req->assoclen >= EIP197_AEAD_IPSEC_IV_SIZE) &&
req->cryptlen > POLY1305_DIGEST_SIZE)) {
return safexcel_queue_req(&req->base, creq, dir);
}
/* HW cannot do full (AAD+payload) zero length, use fallback */
memcpy(key, ctx->key, CHACHA_KEY_SIZE);
if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
/* ESP variant has nonce appended to the key */
key[CHACHA_KEY_SIZE / sizeof(u32)] = ctx->nonce;
ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
CHACHA_KEY_SIZE +
EIP197_AEAD_IPSEC_NONCE_SIZE);
} else {
ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
CHACHA_KEY_SIZE);
}
if (ret) {
crypto_aead_clear_flags(aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aead, crypto_aead_get_flags(ctx->fback) &
CRYPTO_TFM_REQ_MASK);
return ret;
}
aead_request_set_tfm(subreq, ctx->fback);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
aead_request_set_ad(subreq, req->assoclen);
return (dir == SAFEXCEL_ENCRYPT) ?
crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
}
static int safexcel_aead_chachapoly_encrypt(struct aead_request *req)
{
return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_ENCRYPT);
}
static int safexcel_aead_chachapoly_decrypt(struct aead_request *req)
{
return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_DECRYPT);
}
static int safexcel_aead_fallback_cra_init(struct crypto_tfm *tfm)
{
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
/* Allocate fallback implementation */
ctx->fback = crypto_alloc_aead(alg->base.cra_name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fback))
return PTR_ERR(ctx->fback);
crypto_aead_set_reqsize(aead, max(sizeof(struct safexcel_cipher_req),
sizeof(struct aead_request) +
crypto_aead_reqsize(ctx->fback)));
return 0;
}
static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
ctx->ctrinit = 0;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
ctx->state_sz = 0; /* Precomputed by HW */
return 0;
}
static void safexcel_aead_fallback_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_aead(ctx->fback);
safexcel_aead_cra_exit(tfm);
}
struct safexcel_alg_template safexcel_alg_chachapoly = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
.alg.aead = {
.setkey = safexcel_aead_chachapoly_setkey,
.setauthsize = safexcel_aead_chachapoly_setauthsize,
.encrypt = safexcel_aead_chachapoly_encrypt,
.decrypt = safexcel_aead_chachapoly_decrypt,
.ivsize = CHACHAPOLY_IV_SIZE,
.maxauthsize = POLY1305_DIGEST_SIZE,
.base = {
.cra_name = "rfc7539(chacha20,poly1305)",
.cra_driver_name = "safexcel-chacha20-poly1305",
/* +1 to put it above HW chacha + SW poly */
.cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_chachapoly_cra_init,
.cra_exit = safexcel_aead_fallback_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = safexcel_aead_chachapoly_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
.alg.aead = {
.setkey = safexcel_aead_chachapoly_setkey,
.setauthsize = safexcel_aead_chachapoly_setauthsize,
.encrypt = safexcel_aead_chachapoly_encrypt,
.decrypt = safexcel_aead_chachapoly_decrypt,
.ivsize = CHACHAPOLY_IV_SIZE - EIP197_AEAD_IPSEC_NONCE_SIZE,
.maxauthsize = POLY1305_DIGEST_SIZE,
.base = {
.cra_name = "rfc7539esp(chacha20,poly1305)",
.cra_driver_name = "safexcel-chacha20-poly1305-esp",
/* +1 to put it above HW chacha + SW poly */
.cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_chachapolyesp_cra_init,
.cra_exit = safexcel_aead_fallback_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
if (len != SM4_KEY_SIZE)
return -EINVAL;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, SM4_KEY_SIZE))
ctx->base.needs_inv = true;
memcpy(ctx->key, key, SM4_KEY_SIZE);
ctx->key_len = SM4_KEY_SIZE;
return 0;
}
static int safexcel_sm4_blk_encrypt(struct skcipher_request *req)
{
/* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
return -EINVAL;
else
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
SAFEXCEL_ENCRYPT);
}
static int safexcel_sm4_blk_decrypt(struct skcipher_request *req)
{
/* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
return -EINVAL;
else
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
SAFEXCEL_DECRYPT);
}
static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
ctx->blocksz = 0;
ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_SM4,
.alg.skcipher = {
.setkey = safexcel_skcipher_sm4_setkey,
.encrypt = safexcel_sm4_blk_encrypt,
.decrypt = safexcel_sm4_blk_decrypt,
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.base = {
.cra_name = "ecb(sm4)",
.cra_driver_name = "safexcel-ecb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_sm4_ecb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_SM4,
.alg.skcipher = {
.setkey = safexcel_skcipher_sm4_setkey,
.encrypt = safexcel_sm4_blk_encrypt,
.decrypt = safexcel_sm4_blk_decrypt,
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.base = {
.cra_name = "cbc(sm4)",
.cra_driver_name = "safexcel-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_sm4_cbc_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
.alg.skcipher = {
.setkey = safexcel_skcipher_sm4_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.base = {
.cra_name = "ofb(sm4)",
.cra_driver_name = "safexcel-ofb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_sm4_ofb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
.alg.skcipher = {
.setkey = safexcel_skcipher_sm4_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.base = {
.cra_name = "cfb(sm4)",
.cra_driver_name = "safexcel-cfb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_sm4_cfb_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
/* exclude the nonce here */
len -= CTR_RFC3686_NONCE_SIZE;
return safexcel_skcipher_sm4_setkey(ctfm, key, len);
}
static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.algo_mask = SAFEXCEL_ALG_SM4,
.alg.skcipher = {
.setkey = safexcel_skcipher_sm4ctr_setkey,
.encrypt = safexcel_encrypt,
.decrypt = safexcel_decrypt,
/* Add nonce size */
.min_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.base = {
.cra_name = "rfc3686(ctr(sm4))",
.cra_driver_name = "safexcel-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_skcipher_sm4_ctr_cra_init,
.cra_exit = safexcel_skcipher_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sm4_blk_encrypt(struct aead_request *req)
{
/* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
return -EINVAL;
return safexcel_queue_req(&req->base, aead_request_ctx(req),
SAFEXCEL_ENCRYPT);
}
static int safexcel_aead_sm4_blk_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
/* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
return -EINVAL;
return safexcel_queue_req(&req->base, aead_request_ctx(req),
SAFEXCEL_DECRYPT);
}
static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_sm4_blk_encrypt,
.decrypt = safexcel_aead_sm4_blk_decrypt,
.ivsize = SM4_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha1),cbc(sm4))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sm4cbc_sha1_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_fallback_setkey(struct crypto_aead *ctfm,
const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
/* Keep fallback cipher synchronized */
return crypto_aead_setkey(ctx->fback, (u8 *)key, len) ?:
safexcel_aead_setkey(ctfm, key, len);
}
static int safexcel_aead_fallback_setauthsize(struct crypto_aead *ctfm,
unsigned int authsize)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
/* Keep fallback cipher synchronized */
return crypto_aead_setauthsize(ctx->fback, authsize);
}
static int safexcel_aead_fallback_crypt(struct aead_request *req,
enum safexcel_cipher_direction dir)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, ctx->fback);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
aead_request_set_ad(subreq, req->assoclen);
return (dir == SAFEXCEL_ENCRYPT) ?
crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
}
static int safexcel_aead_sm4cbc_sm3_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
/* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
return -EINVAL;
else if (req->cryptlen || req->assoclen) /* If input length > 0 only */
return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
/* HW cannot do full (AAD+payload) zero length, use fallback */
return safexcel_aead_fallback_crypt(req, SAFEXCEL_ENCRYPT);
}
static int safexcel_aead_sm4cbc_sm3_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
/* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
return -EINVAL;
else if (req->cryptlen > crypto_aead_authsize(tfm) || req->assoclen)
/* If input length > 0 only */
return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
/* HW cannot do full (AAD+payload) zero length, use fallback */
return safexcel_aead_fallback_crypt(req, SAFEXCEL_DECRYPT);
}
static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
ctx->state_sz = SM3_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
.alg.aead = {
.setkey = safexcel_aead_fallback_setkey,
.setauthsize = safexcel_aead_fallback_setauthsize,
.encrypt = safexcel_aead_sm4cbc_sm3_encrypt,
.decrypt = safexcel_aead_sm4cbc_sm3_decrypt,
.ivsize = SM4_BLOCK_SIZE,
.maxauthsize = SM3_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sm3),cbc(sm4))",
.cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sm4cbc_sm3_cra_init,
.cra_exit = safexcel_aead_fallback_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sm4ctr_sha1_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sm4cbc_sha1_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sha1),rfc3686(ctr(sm4)))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sm4ctr_sha1_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_aead_sm4ctr_sm3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_sm4cbc_sm3_cra_init(tfm);
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
.alg.aead = {
.setkey = safexcel_aead_setkey,
.encrypt = safexcel_aead_encrypt,
.decrypt = safexcel_aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SM3_DIGEST_SIZE,
.base = {
.cra_name = "authenc(hmac(sm3),rfc3686(ctr(sm4)))",
.cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_aead_sm4ctr_sm3_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
static int safexcel_rfc4106_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
len -= CTR_RFC3686_NONCE_SIZE;
return safexcel_aead_gcm_setkey(ctfm, key, len);
}
static int safexcel_rfc4106_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return crypto_rfc4106_check_authsize(authsize);
}
static int safexcel_rfc4106_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
safexcel_aead_encrypt(req);
}
static int safexcel_rfc4106_decrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
safexcel_aead_decrypt(req);
}
static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
.alg.aead = {
.setkey = safexcel_rfc4106_gcm_setkey,
.setauthsize = safexcel_rfc4106_gcm_setauthsize,
.encrypt = safexcel_rfc4106_encrypt,
.decrypt = safexcel_rfc4106_decrypt,
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.base = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "safexcel-rfc4106-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_rfc4106_gcm_cra_init,
.cra_exit = safexcel_aead_gcm_cra_exit,
},
},
};
static int safexcel_rfc4543_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize != GHASH_DIGEST_SIZE)
return -EINVAL;
return 0;
}
static int safexcel_rfc4543_gcm_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP_GMAC;
return ret;
}
struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
.alg.aead = {
.setkey = safexcel_rfc4106_gcm_setkey,
.setauthsize = safexcel_rfc4543_gcm_setauthsize,
.encrypt = safexcel_rfc4106_encrypt,
.decrypt = safexcel_rfc4106_decrypt,
.ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.base = {
.cra_name = "rfc4543(gcm(aes))",
.cra_driver_name = "safexcel-rfc4543-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_rfc4543_gcm_cra_init,
.cra_exit = safexcel_aead_gcm_cra_exit,
},
},
};
static int safexcel_rfc4309_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
/* First byte of the nonce = L = always 3 for RFC4309 (4 byte ctr) */
*(u8 *)&ctx->nonce = EIP197_AEAD_IPSEC_COUNTER_SIZE - 1;
/* last 3 bytes of key are the nonce! */
memcpy((u8 *)&ctx->nonce + 1, key + len -
EIP197_AEAD_IPSEC_CCM_NONCE_SIZE,
EIP197_AEAD_IPSEC_CCM_NONCE_SIZE);
len -= EIP197_AEAD_IPSEC_CCM_NONCE_SIZE;
return safexcel_aead_ccm_setkey(ctfm, key, len);
}
static int safexcel_rfc4309_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
/* Borrowed from crypto/ccm.c */
switch (authsize) {
case 8:
case 12:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int safexcel_rfc4309_ccm_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
/* Borrowed from crypto/ccm.c */
if (req->assoclen != 16 && req->assoclen != 20)
return -EINVAL;
return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
}
static int safexcel_rfc4309_ccm_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
/* Borrowed from crypto/ccm.c */
if (req->assoclen != 16 && req->assoclen != 20)
return -EINVAL;
return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
}
static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = safexcel_aead_ccm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
.alg.aead = {
.setkey = safexcel_rfc4309_ccm_setkey,
.setauthsize = safexcel_rfc4309_ccm_setauthsize,
.encrypt = safexcel_rfc4309_ccm_encrypt,
.decrypt = safexcel_rfc4309_ccm_decrypt,
.ivsize = EIP197_AEAD_IPSEC_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "rfc4309(ccm(aes))",
.cra_driver_name = "safexcel-rfc4309-ccm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
.cra_alignmask = 0,
.cra_init = safexcel_rfc4309_ccm_cra_init,
.cra_exit = safexcel_aead_cra_exit,
.cra_module = THIS_MODULE,
},
},
};
| linux-master | drivers/crypto/inside-secure/safexcel_cipher.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include "safexcel.h"
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
int i;
struct safexcel_command_desc *cdesc;
dma_addr_t atok;
/* Actual command descriptor ring */
cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
&cdr->base_dma, GFP_KERNEL);
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
/* Command descriptor shadow ring for storing additional token data */
cdr->shoffset = priv->config.cdsh_offset;
cdr->shbase = dmam_alloc_coherent(priv->dev,
cdr->shoffset *
EIP197_DEFAULT_RING_SIZE,
&cdr->shbase_dma, GFP_KERNEL);
if (!cdr->shbase)
return -ENOMEM;
cdr->shwrite = cdr->shbase;
cdr->shbase_end = cdr->shbase + cdr->shoffset *
(EIP197_DEFAULT_RING_SIZE - 1);
/*
* Populate command descriptors with physical pointers to shadow descs.
* Note that we only need to do this once if we don't overwrite them.
*/
cdesc = cdr->base;
atok = cdr->shbase_dma;
for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
cdesc->atok_lo = lower_32_bits(atok);
cdesc->atok_hi = upper_32_bits(atok);
cdesc = (void *)cdesc + cdr->offset;
atok += cdr->shoffset;
}
rdr->offset = priv->config.rd_offset;
/* Use shoffset for result token offset here */
rdr->shoffset = priv->config.res_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
rdr->read = rdr->base;
return 0;
}
inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
{
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring,
bool first,
struct safexcel_token **atoken)
{
void *ptr = ring->write;
if (first)
*atoken = ring->shwrite;
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
if (ring->write == ring->base_end) {
ring->write = ring->base;
ring->shwrite = ring->shbase;
} else {
ring->write += ring->offset;
ring->shwrite += ring->shoffset;
}
return ptr;
}
static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring,
struct result_data_desc **rtoken)
{
void *ptr = ring->write;
/* Result token at relative offset shoffset */
*rtoken = ring->write + ring->shoffset;
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
if (ring->write == ring->base_end)
ring->write = ring->base;
else
ring->write += ring->offset;
return ptr;
}
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring)
{
void *ptr = ring->read;
if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
if (ring->read == ring->base_end)
ring->read = ring->base;
else
ring->read += ring->offset;
return ptr;
}
inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
int ring)
{
struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
return rdr->read;
}
inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
int ring)
{
struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
return (rdr->read - rdr->base) / rdr->offset;
}
inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
int ring,
struct safexcel_result_desc *rdesc)
{
struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
return ((void *)rdesc - rdr->base) / rdr->offset;
}
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring)
{
if (ring->write == ring->read)
return;
if (ring->write == ring->base) {
ring->write = ring->base_end;
ring->shwrite = ring->shbase_end;
} else {
ring->write -= ring->offset;
ring->shwrite -= ring->shoffset;
}
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
dma_addr_t context,
struct safexcel_token **atoken)
{
struct safexcel_command_desc *cdesc;
cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
first, atoken);
if (IS_ERR(cdesc))
return cdesc;
cdesc->particle_size = data_len;
cdesc->rsvd0 = 0;
cdesc->last_seg = last;
cdesc->first_seg = first;
cdesc->additional_cdata_size = 0;
cdesc->rsvd1 = 0;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
if (first) {
/*
* Note that the length here MUST be >0 or else the EIP(1)97
* may hang. Newer EIP197 firmware actually incorporates this
* fix already, but that doesn't help the EIP97 and we may
* also be running older firmware.
*/
cdesc->control_data.packet_length = full_data_len ?: 1;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
EIP197_OPTION_CTX_CTRL_IN_CMD |
EIP197_OPTION_RC_AUTO;
cdesc->control_data.type = EIP197_TYPE_BCLA;
cdesc->control_data.context_lo = lower_32_bits(context) |
EIP197_CONTEXT_SMALL;
cdesc->control_data.context_hi = upper_32_bits(context);
}
return cdesc;
}
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
struct result_data_desc *rtoken;
rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
&rtoken);
if (IS_ERR(rdesc))
return rdesc;
rdesc->particle_size = len;
rdesc->rsvd0 = 0;
rdesc->descriptor_overflow = 1; /* assume error */
rdesc->buffer_overflow = 1; /* assume error */
rdesc->last_seg = last;
rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
rdesc->rsvd1 = 0;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
/* Clear length in result token */
rtoken->packet_length = 0;
/* Assume errors - HW will clear if not the case */
rtoken->error_code = 0x7fff;
return rdesc;
}
| linux-master | drivers/crypto/inside-secure/safexcel_ring.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
#include <crypto/aes.h>
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
#include <crypto/skcipher.h>
#include <crypto/sm3.h>
#include <crypto/internal/cipher.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include "safexcel.h"
struct safexcel_ahash_ctx {
struct safexcel_context base;
u32 alg;
u8 key_sz;
bool cbcmac;
bool do_fallback;
bool fb_init_done;
bool fb_do_setkey;
struct crypto_aes_ctx *aes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
struct shash_desc *shdesc;
};
struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
bool needs_inv;
bool hmac_zlen;
bool len_is_le;
bool not_first;
bool xcbcmac;
int nents;
dma_addr_t result_dma;
u32 digest;
u8 state_sz; /* expected state size, only set once */
u8 block_sz; /* block size, only set once */
u8 digest_sz; /* output digest size, only set once */
__le32 state[SHA3_512_BLOCK_SIZE /
sizeof(__le32)] __aligned(sizeof(__le32));
u64 len;
u64 processed;
u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
};
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
{
return req->len - req->processed;
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
u32 input_length, u32 result_length,
bool cbcmac)
{
struct safexcel_token *token =
(struct safexcel_token *)cdesc->control_data.token;
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token[0].packet_length = input_length;
token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
input_length &= 15;
if (unlikely(cbcmac && input_length)) {
token[0].stat = 0;
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[1].packet_length = 16 - input_length;
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
eip197_noop_token(&token[1]);
}
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
token[2].packet_length = result_length;
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
eip197_noop_token(&token[3]);
}
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->base.priv;
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
cdesc->control_data.control1 = 0;
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
if (req->xcbcmac)
memcpy(ctx->base.ctxr->data, &ctx->base.ipad, ctx->key_sz);
else
memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
if (!req->finish && req->xcbcmac)
cdesc->control_data.control0 |=
CONTEXT_CONTROL_DIGEST_XCM |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_NO_FINISH_HASH |
CONTEXT_CONTROL_SIZE(req->state_sz /
sizeof(u32));
else
cdesc->control_data.control0 |=
CONTEXT_CONTROL_DIGEST_XCM |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_SIZE(req->state_sz /
sizeof(u32));
return;
} else if (!req->processed) {
/* First - and possibly only - block of basic hash only */
if (req->finish)
cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
else
cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
CONTEXT_CONTROL_NO_FINISH_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
return;
}
/* Hash continuation or HMAC, setup (inner) digest from state */
memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
if (req->finish) {
/* Compute digest count for hash/HMAC finish operations */
if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
req->hmac_zlen || (req->processed != req->block_sz)) {
count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
/* This is a hardware limitation, as the
* counter must fit into an u32. This represents
* a fairly big amount of input data, so we
* shouldn't see this.
*/
if (unlikely(count & 0xffffffff00000000ULL)) {
dev_warn(priv->dev,
"Input data is too big\n");
return;
}
}
if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
/* Special case: zero length HMAC */
req->hmac_zlen ||
/* PE HW < 4.4 cannot do HMAC continue, fake using hash */
(req->processed != req->block_sz)) {
/* Basic hash continue operation, need digest + cnt */
cdesc->control_data.control0 |=
CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
/* For zero-len HMAC, don't finalize, already padded! */
if (req->hmac_zlen)
cdesc->control_data.control0 |=
CONTEXT_CONTROL_NO_FINISH_HASH;
cdesc->control_data.control1 |=
CONTEXT_CONTROL_DIGEST_CNT;
ctx->base.ctxr->data[req->state_sz >> 2] =
cpu_to_le32(count);
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
/* Clear zero-length HMAC flag for next operation! */
req->hmac_zlen = false;
} else { /* HMAC */
/* Need outer digest for HMAC finalization */
memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
&ctx->base.opad, req->state_sz);
/* Single pass HMAC - no digest count */
cdesc->control_data.control0 |=
CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_DIGEST_HMAC;
}
} else { /* Hash continuation, do not finish yet */
cdesc->control_data.control0 |=
CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_NO_FINISH_HASH;
}
}
static int safexcel_ahash_enqueue(struct ahash_request *areq);
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx_dma(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
u64 cache_len;
*ret = 0;
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"hash: result: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
} else {
*ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
sreq->nents = 0;
}
if (sreq->result_dma) {
dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz,
DMA_FROM_DEVICE);
sreq->result_dma = 0;
}
if (sreq->cache_dma) {
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
DMA_TO_DEVICE);
sreq->cache_dma = 0;
sreq->cache_sz = 0;
}
if (sreq->finish) {
if (sreq->hmac &&
(sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
/* Faking HMAC using hash - need to do outer hash */
memcpy(sreq->cache, sreq->state,
crypto_ahash_digestsize(ahash));
memcpy(sreq->state, &ctx->base.opad, sreq->digest_sz);
sreq->len = sreq->block_sz +
crypto_ahash_digestsize(ahash);
sreq->processed = sreq->block_sz;
sreq->hmac = 0;
if (priv->flags & EIP197_TRC_CACHE)
ctx->base.needs_inv = true;
areq->nbytes = 0;
safexcel_ahash_enqueue(areq);
*should_complete = false; /* Not done yet */
return 1;
}
if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
/* Undo final XOR with 0xffffffff ...*/
*(__le32 *)areq->result = ~sreq->state[0];
} else {
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
}
}
cache_len = safexcel_queued_len(sreq);
if (cache_len)
memcpy(sreq->cache, sreq->cache_next, cache_len);
*should_complete = true;
return 1;
}
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
struct safexcel_token *dmmy;
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
u64 queued, len;
queued = safexcel_queued_len(req);
if (queued <= HASH_CACHE_SIZE)
cache_len = queued;
else
cache_len = queued - areq->nbytes;
if (!req->finish && !req->last_req) {
/* If this is not the last request and the queued data does not
* fit into full cache blocks, cache it for the next send call.
*/
extra = queued & (HASH_CACHE_SIZE - 1);
/* If this is not the last request and the queued data
* is a multiple of a block, cache the last one for now.
*/
if (!extra)
extra = HASH_CACHE_SIZE;
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
req->cache_next, extra,
areq->nbytes - extra);
queued -= extra;
if (!queued) {
*commands = 0;
*results = 0;
return 0;
}
extra = 0;
}
if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) {
if (unlikely(cache_len < AES_BLOCK_SIZE)) {
/*
* Cache contains less than 1 full block, complete.
*/
extra = AES_BLOCK_SIZE - cache_len;
if (queued > cache_len) {
/* More data follows: borrow bytes */
u64 tmp = queued - cache_len;
skip = min_t(u64, tmp, extra);
sg_pcopy_to_buffer(areq->src,
sg_nents(areq->src),
req->cache + cache_len,
skip, 0);
}
extra -= skip;
memset(req->cache + cache_len + skip, 0, extra);
if (!ctx->cbcmac && extra) {
// 10- padding for XCBCMAC & CMAC
req->cache[cache_len + skip] = 0x80;
// HW will use K2 iso K3 - compensate!
for (i = 0; i < AES_BLOCK_SIZE / 4; i++) {
u32 *cache = (void *)req->cache;
u32 *ipad = ctx->base.ipad.word;
u32 x;
x = ipad[i] ^ ipad[i + 4];
cache[i] ^= swab32(x);
}
}
cache_len = AES_BLOCK_SIZE;
queued = queued + extra;
}
/* XCBC continue: XOR previous result into 1st word */
crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE);
}
len = queued;
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, req->cache_dma))
return -EINVAL;
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
req->cache_dma, cache_len,
len, ctx->base.ctxr_dma,
&dmmy);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
}
n_cdesc++;
queued -= cache_len;
if (!queued)
goto send_command;
}
/* Now handle the current ahash request buffer(s) */
req->nents = dma_map_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src,
areq->nbytes),
DMA_TO_DEVICE);
if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
if (unlikely(sglen <= skip)) {
skip -= sglen;
continue;
}
/* Do not overflow the request */
if ((queued + skip) <= sglen)
sglen = queued;
else
sglen -= skip;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
sg_dma_address(sg) + skip, sglen,
len, ctx->base.ctxr_dma, &dmmy);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
}
if (!n_cdesc)
first_cdesc = cdesc;
n_cdesc++;
queued -= sglen;
if (!queued)
break;
skip = 0;
}
send_command:
/* Setup the context options */
safexcel_context_control(ctx, req, first_cdesc);
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac);
req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL;
goto unmap_sg;
}
/* Add a result descriptor */
rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
req->digest_sz);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
goto unmap_result;
}
safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
req->processed += len - extra;
*commands = n_cdesc;
*results = 1;
return 0;
unmap_result:
dma_unmap_single(priv->dev, req->result_dma, req->digest_sz,
DMA_FROM_DEVICE);
unmap_sg:
if (req->nents) {
dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
req->nents = 0;
}
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache:
if (req->cache_dma) {
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
DMA_TO_DEVICE);
req->cache_dma = 0;
req->cache_sz = 0;
}
return ret;
}
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
int enq_ret;
*ret = 0;
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"hash: invalidate: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
} else {
*ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
ctx->base.ctxr_dma);
*should_complete = true;
return 1;
}
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
*should_complete = false;
return 1;
}
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
int err;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
if (req->needs_inv) {
req->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async,
should_complete, ret);
}
return err;
}
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
ret = safexcel_invalidate_cache(async, ctx->base.priv,
ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
*commands = 1;
*results = 1;
return 0;
}
static int safexcel_ahash_send(struct crypto_async_request *async,
int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
int ret;
if (req->needs_inv)
ret = safexcel_ahash_send_inv(async, ring, commands, results);
else
ret = safexcel_ahash_send_req(async, ring, commands, results);
return ret;
}
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req);
DECLARE_CRYPTO_WAIT(result);
int ring = ctx->base.ring;
int err;
memset(req, 0, EIP197_AHASH_REQ_SIZE);
/* create invalidation request */
init_completion(&result.completion);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
err = crypto_wait_req(-EINPROGRESS, &result);
if (err) {
dev_warn(priv->dev, "hash: completion error (%d)\n", err);
return err;
}
return 0;
}
/* safexcel_ahash_cache: cache data until at least one request can be sent to
* the engine, aka. when there is at least 1 block size in the pipe.
*/
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
u64 cache_len;
/* cache_len: everything accepted by the driver but not sent yet,
* tot sz handled by update() - last req sz - tot sz handled by send()
*/
cache_len = safexcel_queued_len(req);
/*
* In case there isn't enough bytes to proceed (less than a
* block size), cache the data until we have enough.
*/
if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
req->cache + cache_len,
areq->nbytes, 0);
return 0;
}
/* We couldn't cache all the data */
return -E2BIG;
}
static int safexcel_ahash_enqueue(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
req->needs_inv = false;
if (ctx->base.ctxr) {
if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
/* invalidate for *any* non-XCBC continuation */
((req->not_first && !req->xcbcmac) ||
/* invalidate if (i)digest changed */
memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
/* invalidate for HMAC finish with odigest changed */
(req->finish && req->hmac &&
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
&ctx->base.opad, req->state_sz))))
/*
* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
* set in other functions and we want to keep the same
* logic.
*/
ctx->base.needs_inv = true;
if (ctx->base.needs_inv) {
ctx->base.needs_inv = false;
req->needs_inv = true;
}
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(areq->base),
&ctx->base.ctxr_dma);
if (!ctx->base.ctxr)
return -ENOMEM;
}
req->not_first = true;
ring = ctx->base.ring;
spin_lock_bh(&priv->ring[ring].queue_lock);
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
return ret;
}
static int safexcel_ahash_update(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
int ret;
/* If the request is 0 length, do nothing */
if (!areq->nbytes)
return 0;
/* Add request to the cache if it fits */
ret = safexcel_ahash_cache(areq);
/* Update total request length */
req->len += areq->nbytes;
/* If not all data could fit into the cache, go process the excess.
* Also go process immediately for an HMAC IV precompute, which
* will never be finished at all, but needs to be processed anyway.
*/
if ((ret && !req->finish) || req->last_req)
return safexcel_ahash_enqueue(areq);
return 0;
}
static int safexcel_ahash_final(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
req->finish = true;
if (unlikely(!req->len && !areq->nbytes)) {
/*
* If we have an overall 0 length *hash* request:
* The HW cannot do 0 length hash, so we provide the correct
* result directly here.
*/
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
memcpy(areq->result, md5_zero_message_hash,
MD5_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
memcpy(areq->result, sha1_zero_message_hash,
SHA1_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
memcpy(areq->result, sha224_zero_message_hash,
SHA224_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
memcpy(areq->result, sha256_zero_message_hash,
SHA256_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
memcpy(areq->result, sha384_zero_message_hash,
SHA384_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
memcpy(areq->result, sha512_zero_message_hash,
SHA512_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) {
memcpy(areq->result,
EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE);
}
return 0;
} else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM &&
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
req->len == sizeof(u32) && !areq->nbytes)) {
/* Zero length CRC32 */
memcpy(areq->result, &ctx->base.ipad, sizeof(u32));
return 0;
} else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
!areq->nbytes)) {
/* Zero length CBC MAC */
memset(areq->result, 0, AES_BLOCK_SIZE);
return 0;
} else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE &&
!areq->nbytes)) {
/* Zero length (X)CBC/CMAC */
int i;
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
u32 *result = (void *)areq->result;
/* K3 */
result[i] = swab32(ctx->base.ipad.word[i + 4]);
}
areq->result[0] ^= 0x80; // 10- padding
aes_encrypt(ctx->aes, areq->result, areq->result);
return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
!areq->nbytes)) {
/*
* If we have an overall 0 length *HMAC* request:
* For HMAC, we need to finalize the inner digest
* and then perform the outer hash.
*/
/* generate pad block in the cache */
/* start with a hash block of all zeroes */
memset(req->cache, 0, req->block_sz);
/* set the first byte to 0x80 to 'append a 1 bit' */
req->cache[0] = 0x80;
/* add the length in bits in the last 2 bytes */
if (req->len_is_le) {
/* Little endian length word (e.g. MD5) */
req->cache[req->block_sz-8] = (req->block_sz << 3) &
255;
req->cache[req->block_sz-7] = (req->block_sz >> 5);
} else {
/* Big endian length word (e.g. any SHA) */
req->cache[req->block_sz-2] = (req->block_sz >> 5);
req->cache[req->block_sz-1] = (req->block_sz << 3) &
255;
}
req->len += req->block_sz; /* plus 1 hash block */
/* Set special zero-length HMAC flag */
req->hmac_zlen = true;
/* Finalize HMAC */
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
} else if (req->hmac) {
/* Finalize HMAC */
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
}
return safexcel_ahash_enqueue(areq);
}
static int safexcel_ahash_finup(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
req->finish = true;
safexcel_ahash_update(areq);
return safexcel_ahash_final(areq);
}
static int safexcel_ahash_export(struct ahash_request *areq, void *out)
{
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
struct safexcel_ahash_export_state *export = out;
export->len = req->len;
export->processed = req->processed;
export->digest = req->digest;
memcpy(export->state, req->state, req->state_sz);
memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
return 0;
}
static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
{
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
const struct safexcel_ahash_export_state *export = in;
int ret;
ret = crypto_ahash_init(areq);
if (ret)
return ret;
req->len = export->len;
req->processed = export->processed;
req->digest = export->digest;
memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
memcpy(req->state, export->state, req->state_sz);
return 0;
}
static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_alg_template *tmpl =
container_of(__crypto_ahash_alg(tfm->__crt_alg),
struct safexcel_alg_template, alg.ahash);
ctx->base.priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
ctx->fb_do_setkey = false;
crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
return 0;
}
static int safexcel_sha1_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
return 0;
}
static int safexcel_sha1_digest(struct ahash_request *areq)
{
int ret = safexcel_sha1_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
/* context not allocated, skip invalidation */
if (!ctx->base.ctxr)
return;
if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_ahash_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
} else {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
ctx->base.ctxr_dma);
}
}
struct safexcel_alg_template safexcel_alg_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA1,
.alg.ahash = {
.init = safexcel_sha1_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_sha1_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "safexcel-sha1",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha1_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, SHA1_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA1_BLOCK_SIZE;
req->processed = SHA1_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
req->hmac = true;
return 0;
}
static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_sha1_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
static int safexcel_hmac_init_pad(struct ahash_request *areq,
unsigned int blocksize, const u8 *key,
unsigned int keylen, u8 *ipad, u8 *opad)
{
DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret, i;
u8 *keydup;
if (keylen <= blocksize) {
memcpy(ipad, key, keylen);
} else {
keydup = kmemdup(key, keylen, GFP_KERNEL);
if (!keydup)
return -ENOMEM;
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
sg_init_one(&sg, keydup, keylen);
ahash_request_set_crypt(areq, &sg, ipad, keylen);
ret = crypto_ahash_digest(areq);
ret = crypto_wait_req(ret, &result);
/* Avoid leaking */
kfree_sensitive(keydup);
if (ret)
return ret;
keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
}
memset(ipad + keylen, 0, blocksize - keylen);
memcpy(opad, ipad, blocksize);
for (i = 0; i < blocksize; i++) {
ipad[i] ^= HMAC_IPAD_VALUE;
opad[i] ^= HMAC_OPAD_VALUE;
}
return 0;
}
static int safexcel_hmac_init_iv(struct ahash_request *areq,
unsigned int blocksize, u8 *pad, void *state)
{
struct safexcel_ahash_req *req;
DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
sg_init_one(&sg, pad, blocksize);
ahash_request_set_crypt(areq, &sg, pad, blocksize);
ret = crypto_ahash_init(areq);
if (ret)
return ret;
req = ahash_request_ctx_dma(areq);
req->hmac = true;
req->last_req = true;
ret = crypto_ahash_update(areq);
ret = crypto_wait_req(ret, &result);
return ret ?: crypto_ahash_export(areq, state);
}
static int __safexcel_hmac_setkey(const char *alg, const u8 *key,
unsigned int keylen,
void *istate, void *ostate)
{
struct ahash_request *areq;
struct crypto_ahash *tfm;
unsigned int blocksize;
u8 *ipad, *opad;
int ret;
tfm = crypto_alloc_ahash(alg, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
areq = ahash_request_alloc(tfm, GFP_KERNEL);
if (!areq) {
ret = -ENOMEM;
goto free_ahash;
}
crypto_ahash_clear_flags(tfm, ~0);
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
ipad = kcalloc(2, blocksize, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto free_request;
}
opad = ipad + blocksize;
ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
if (ret)
goto free_ipad;
ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
if (ret)
goto free_ipad;
ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
free_ipad:
kfree(ipad);
free_request:
ahash_request_free(areq);
free_ahash:
crypto_free_ahash(tfm);
return ret;
}
int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
unsigned int keylen, const char *alg,
unsigned int state_sz)
{
struct safexcel_crypto_priv *priv = base->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret;
ret = __safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
if (ret)
return ret;
if (priv->flags & EIP197_TRC_CACHE && base->ctxr &&
(memcmp(&base->ipad, istate.state, state_sz) ||
memcmp(&base->opad, ostate.state, state_sz)))
base->needs_inv = true;
memcpy(&base->ipad, &istate.state, state_sz);
memcpy(&base->opad, &ostate.state, state_sz);
return 0;
}
static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen, const char *alg,
unsigned int state_sz)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
return safexcel_hmac_setkey(&ctx->base, key, keylen, alg, state_sz);
}
static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
SHA1_DIGEST_SIZE);
}
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA1,
.alg.ahash = {
.init = safexcel_hmac_sha1_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_sha1_digest,
.setkey = safexcel_hmac_sha1_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "safexcel-hmac-sha1",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha256_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
}
static int safexcel_sha256_digest(struct ahash_request *areq)
{
int ret = safexcel_sha256_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_256,
.alg.ahash = {
.init = safexcel_sha256_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_sha256_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "safexcel-sha256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha224_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
}
static int safexcel_sha224_digest(struct ahash_request *areq)
{
int ret = safexcel_sha224_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_256,
.alg.ahash = {
.init = safexcel_sha224_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_sha224_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "safexcel-sha224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
SHA256_DIGEST_SIZE);
}
static int safexcel_hmac_sha224_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA256_BLOCK_SIZE;
req->processed = SHA256_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
return 0;
}
static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_sha224_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_256,
.alg.ahash = {
.init = safexcel_hmac_sha224_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_sha224_digest,
.setkey = safexcel_hmac_sha224_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "safexcel-hmac-sha224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
SHA256_DIGEST_SIZE);
}
static int safexcel_hmac_sha256_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA256_BLOCK_SIZE;
req->processed = SHA256_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
return 0;
}
static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_sha256_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_256,
.alg.ahash = {
.init = safexcel_hmac_sha256_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_sha256_digest,
.setkey = safexcel_hmac_sha256_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "safexcel-hmac-sha256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha512_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
}
static int safexcel_sha512_digest(struct ahash_request *areq)
{
int ret = safexcel_sha512_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_sha512 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_512,
.alg.ahash = {
.init = safexcel_sha512_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_sha512_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "safexcel-sha512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha384_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
}
static int safexcel_sha384_digest(struct ahash_request *areq)
{
int ret = safexcel_sha384_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_sha384 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_512,
.alg.ahash = {
.init = safexcel_sha384_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_sha384_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "safexcel-sha384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
SHA512_DIGEST_SIZE);
}
static int safexcel_hmac_sha512_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA512_BLOCK_SIZE;
req->processed = SHA512_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
return 0;
}
static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_sha512_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_512,
.alg.ahash = {
.init = safexcel_hmac_sha512_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_sha512_digest,
.setkey = safexcel_hmac_sha512_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "safexcel-hmac-sha512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
SHA512_DIGEST_SIZE);
}
static int safexcel_hmac_sha384_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA512_BLOCK_SIZE;
req->processed = SHA512_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
return 0;
}
static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_sha384_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA2_512,
.alg.ahash = {
.init = safexcel_hmac_sha384_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_sha384_digest,
.setkey = safexcel_hmac_sha384_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "safexcel-hmac-sha384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_md5_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
return 0;
}
static int safexcel_md5_digest(struct ahash_request *areq)
{
int ret = safexcel_md5_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_md5 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_MD5,
.alg.ahash = {
.init = safexcel_md5_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_md5_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "safexcel-md5",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_md5_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, MD5_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = MD5_HMAC_BLOCK_SIZE;
req->processed = MD5_HMAC_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
req->len_is_le = true; /* MD5 is little endian! ... */
req->hmac = true;
return 0;
}
static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
MD5_DIGEST_SIZE);
}
static int safexcel_hmac_md5_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_md5_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_hmac_md5 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_MD5,
.alg.ahash = {
.init = safexcel_hmac_md5_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_md5_digest,
.setkey = safexcel_hmac_md5_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "safexcel-hmac-md5",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
int ret = safexcel_ahash_cra_init(tfm);
/* Default 'key' is all zeroes */
memset(&ctx->base.ipad, 0, sizeof(u32));
return ret;
}
static int safexcel_crc32_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from loaded key */
req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
/* Set processed to non-zero to enable invalidation detection */
req->len = sizeof(u32);
req->processed = sizeof(u32);
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
req->digest = CONTEXT_CONTROL_DIGEST_XCM;
req->state_sz = sizeof(u32);
req->digest_sz = sizeof(u32);
req->block_sz = sizeof(u32);
return 0;
}
static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
if (keylen != sizeof(u32))
return -EINVAL;
memcpy(&ctx->base.ipad, key, sizeof(u32));
return 0;
}
static int safexcel_crc32_digest(struct ahash_request *areq)
{
return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_crc32 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = 0,
.alg.ahash = {
.init = safexcel_crc32_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_crc32_digest,
.setkey = safexcel_crc32_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = sizeof(u32),
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "crc32",
.cra_driver_name = "safexcel-crc32",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_crc32_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_cbcmac_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from loaded keys */
memcpy(req->state, &ctx->base.ipad, ctx->key_sz);
/* Set processed to non-zero to enable invalidation detection */
req->len = AES_BLOCK_SIZE;
req->processed = AES_BLOCK_SIZE;
req->digest = CONTEXT_CONTROL_DIGEST_XCM;
req->state_sz = ctx->key_sz;
req->digest_sz = AES_BLOCK_SIZE;
req->block_sz = AES_BLOCK_SIZE;
req->xcbcmac = true;
return 0;
}
static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct crypto_aes_ctx aes;
int ret, i;
ret = aes_expandkey(&aes, key, len);
if (ret)
return ret;
memset(&ctx->base.ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
ctx->base.ipad.be[i + 8] = cpu_to_be32(aes.key_enc[i]);
if (len == AES_KEYSIZE_192) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
} else if (len == AES_KEYSIZE_256) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
} else {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
}
ctx->cbcmac = true;
memzero_explicit(&aes, sizeof(aes));
return 0;
}
static int safexcel_cbcmac_digest(struct ahash_request *areq)
{
return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_cbcmac = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = 0,
.alg.ahash = {
.init = safexcel_cbcmac_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_cbcmac_digest,
.setkey = safexcel_cbcmac_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "cbcmac(aes)",
.cra_driver_name = "safexcel-cbcmac-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
int ret, i;
ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
/* precompute the XCBC key material */
aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
"\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
aes_encrypt(ctx->aes, (u8 *)key_tmp,
"\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE,
"\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
ctx->base.ipad.word[i] = swab32(key_tmp[i]);
ret = aes_expandkey(ctx->aes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
if (ret)
return ret;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
ctx->cbcmac = false;
return 0;
}
static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_ahash_cra_init(tfm);
ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
return PTR_ERR_OR_ZERO(ctx->aes);
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
kfree(ctx->aes);
safexcel_ahash_cra_exit(tfm);
}
struct safexcel_alg_template safexcel_alg_xcbcmac = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = 0,
.alg.ahash = {
.init = safexcel_cbcmac_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_cbcmac_digest,
.setkey = safexcel_xcbcmac_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "safexcel-xcbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_xcbcmac_cra_init,
.cra_exit = safexcel_xcbcmac_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
__be64 consts[4];
u64 _const[2];
u8 msb_mask, gfmask;
int ret, i;
/* precompute the CMAC key material */
ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
for (i = 0; i < len / sizeof(u32); i++)
ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
/* code below borrowed from crypto/cmac.c */
/* encrypt the zero block */
memset(consts, 0, AES_BLOCK_SIZE);
aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts);
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
_const[1] = be64_to_cpu(consts[0]);
/* gf(2^128) multiply zero-ciphertext with u and u^2 */
for (i = 0; i < 4; i += 2) {
msb_mask = ((s64)_const[1] >> 63) & gfmask;
_const[1] = (_const[1] << 1) | (_const[0] >> 63);
_const[0] = (_const[0] << 1) ^ msb_mask;
consts[i + 0] = cpu_to_be64(_const[1]);
consts[i + 1] = cpu_to_be64(_const[0]);
}
/* end of code borrowed from crypto/cmac.c */
for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
ctx->base.ipad.be[i] = cpu_to_be32(((u32 *)consts)[i]);
if (len == AES_KEYSIZE_192) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
} else if (len == AES_KEYSIZE_256) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
} else {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
}
ctx->cbcmac = false;
return 0;
}
struct safexcel_alg_template safexcel_alg_cmac = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = 0,
.alg.ahash = {
.init = safexcel_cbcmac_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_cbcmac_digest,
.setkey = safexcel_cmac_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "cmac(aes)",
.cra_driver_name = "safexcel-cmac-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_xcbcmac_cra_init,
.cra_exit = safexcel_xcbcmac_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sm3_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SM3_DIGEST_SIZE;
req->digest_sz = SM3_DIGEST_SIZE;
req->block_sz = SM3_BLOCK_SIZE;
return 0;
}
static int safexcel_sm3_digest(struct ahash_request *areq)
{
int ret = safexcel_sm3_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_sm3 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SM3,
.alg.ahash = {
.init = safexcel_sm3_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_sm3_digest,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sm3",
.cra_driver_name = "safexcel-sm3",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3",
SM3_DIGEST_SIZE);
}
static int safexcel_hmac_sm3_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
memcpy(req->state, &ctx->base.ipad, SM3_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SM3_BLOCK_SIZE;
req->processed = SM3_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SM3_DIGEST_SIZE;
req->digest_sz = SM3_DIGEST_SIZE;
req->block_sz = SM3_BLOCK_SIZE;
req->hmac = true;
return 0;
}
static int safexcel_hmac_sm3_digest(struct ahash_request *areq)
{
int ret = safexcel_hmac_sm3_init(areq);
if (ret)
return ret;
return safexcel_ahash_finup(areq);
}
struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SM3,
.alg.ahash = {
.init = safexcel_hmac_sm3_init,
.update = safexcel_ahash_update,
.final = safexcel_ahash_final,
.finup = safexcel_ahash_finup,
.digest = safexcel_hmac_sm3_digest,
.setkey = safexcel_hmac_sm3_setkey,
.export = safexcel_ahash_export,
.import = safexcel_ahash_import,
.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sm3)",
.cra_driver_name = "safexcel-hmac-sm3",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha3_224_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
req->state_sz = SHA3_224_DIGEST_SIZE;
req->digest_sz = SHA3_224_DIGEST_SIZE;
req->block_sz = SHA3_224_BLOCK_SIZE;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_sha3_fbcheck(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
int ret = 0;
if (ctx->do_fallback) {
ahash_request_set_tfm(subreq, ctx->fback);
ahash_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
ahash_request_set_crypt(subreq, req->src, req->result,
req->nbytes);
if (!ctx->fb_init_done) {
if (ctx->fb_do_setkey) {
/* Set fallback cipher HMAC key */
u8 key[SHA3_224_BLOCK_SIZE];
memcpy(key, &ctx->base.ipad,
crypto_ahash_blocksize(ctx->fback) / 2);
memcpy(key +
crypto_ahash_blocksize(ctx->fback) / 2,
&ctx->base.opad,
crypto_ahash_blocksize(ctx->fback) / 2);
ret = crypto_ahash_setkey(ctx->fback, key,
crypto_ahash_blocksize(ctx->fback));
memzero_explicit(key,
crypto_ahash_blocksize(ctx->fback));
ctx->fb_do_setkey = false;
}
ret = ret ?: crypto_ahash_init(subreq);
ctx->fb_init_done = true;
}
}
return ret;
}
static int safexcel_sha3_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
}
static int safexcel_sha3_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
}
static int safexcel_sha3_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback |= !req->nbytes;
if (ctx->do_fallback)
/* Update or ex/import happened or len 0, cannot use the HW */
return safexcel_sha3_fbcheck(req) ?:
crypto_ahash_finup(subreq);
else
return safexcel_ahash_finup(req);
}
static int safexcel_sha3_digest_fallback(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
ctx->fb_init_done = false;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq);
}
static int safexcel_sha3_224_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req);
/* HW cannot do zero length hash, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
static int safexcel_sha3_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
}
static int safexcel_sha3_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = ahash_request_ctx_dma(req);
ctx->do_fallback = true;
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
// return safexcel_ahash_import(req, in);
}
static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
{
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_ahash_cra_init(tfm);
/* Allocate fallback implementation */
ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fback))
return PTR_ERR(ctx->fback);
/* Update statesize from fallback algorithm! */
crypto_hash_alg_common(ahash)->statesize =
crypto_ahash_statesize(ctx->fback);
crypto_ahash_set_reqsize_dma(
ahash, max(sizeof(struct safexcel_ahash_req),
sizeof(struct ahash_request) +
crypto_ahash_reqsize(ctx->fback)));
return 0;
}
static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(ctx->fback);
safexcel_ahash_cra_exit(tfm);
}
struct safexcel_alg_template safexcel_alg_sha3_224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_sha3_224_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_sha3_224_digest,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_224_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha3-224",
.cra_driver_name = "safexcel-sha3-224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_sha3_cra_init,
.cra_exit = safexcel_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha3_256_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
req->state_sz = SHA3_256_DIGEST_SIZE;
req->digest_sz = SHA3_256_DIGEST_SIZE;
req->block_sz = SHA3_256_BLOCK_SIZE;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_sha3_256_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req);
/* HW cannot do zero length hash, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
struct safexcel_alg_template safexcel_alg_sha3_256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_sha3_256_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_sha3_256_digest,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_256_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha3-256",
.cra_driver_name = "safexcel-sha3-256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_sha3_cra_init,
.cra_exit = safexcel_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha3_384_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
req->state_sz = SHA3_384_DIGEST_SIZE;
req->digest_sz = SHA3_384_DIGEST_SIZE;
req->block_sz = SHA3_384_BLOCK_SIZE;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_sha3_384_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req);
/* HW cannot do zero length hash, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
struct safexcel_alg_template safexcel_alg_sha3_384 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_sha3_384_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_sha3_384_digest,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_384_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "safexcel-sha3-384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_sha3_cra_init,
.cra_exit = safexcel_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_sha3_512_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
req->state_sz = SHA3_512_DIGEST_SIZE;
req->digest_sz = SHA3_512_DIGEST_SIZE;
req->block_sz = SHA3_512_BLOCK_SIZE;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_sha3_512_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req);
/* HW cannot do zero length hash, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
struct safexcel_alg_template safexcel_alg_sha3_512 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_sha3_512_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_sha3_512_digest,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_512_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "sha3-512",
.cra_driver_name = "safexcel-sha3-512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_sha3_cra_init,
.cra_exit = safexcel_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = safexcel_sha3_cra_init(tfm);
if (ret)
return ret;
/* Allocate precalc basic digest implementation */
ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->shpre))
return PTR_ERR(ctx->shpre);
ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) +
crypto_shash_descsize(ctx->shpre), GFP_KERNEL);
if (!ctx->shdesc) {
crypto_free_shash(ctx->shpre);
return -ENOMEM;
}
ctx->shdesc->tfm = ctx->shpre;
return 0;
}
static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(ctx->fback);
crypto_free_shash(ctx->shpre);
kfree(ctx->shdesc);
safexcel_ahash_cra_exit(tfm);
}
static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
int ret = 0;
if (keylen > crypto_ahash_blocksize(tfm)) {
/*
* If the key is larger than the blocksize, then hash it
* first using our fallback cipher
*/
ret = crypto_shash_digest(ctx->shdesc, key, keylen,
ctx->base.ipad.byte);
keylen = crypto_shash_digestsize(ctx->shpre);
/*
* If the digest is larger than half the blocksize, we need to
* move the rest to opad due to the way our HMAC infra works.
*/
if (keylen > crypto_ahash_blocksize(tfm) / 2)
/* Buffers overlap, need to use memmove iso memcpy! */
memmove(&ctx->base.opad,
ctx->base.ipad.byte +
crypto_ahash_blocksize(tfm) / 2,
keylen - crypto_ahash_blocksize(tfm) / 2);
} else {
/*
* Copy the key to our ipad & opad buffers
* Note that ipad and opad each contain one half of the key,
* to match the existing HMAC driver infrastructure.
*/
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
memcpy(&ctx->base.ipad, key, keylen);
} else {
memcpy(&ctx->base.ipad, key,
crypto_ahash_blocksize(tfm) / 2);
memcpy(&ctx->base.opad,
key + crypto_ahash_blocksize(tfm) / 2,
keylen - crypto_ahash_blocksize(tfm) / 2);
}
}
/* Pad key with zeroes */
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
memset(ctx->base.ipad.byte + keylen, 0,
crypto_ahash_blocksize(tfm) / 2 - keylen);
memset(&ctx->base.opad, 0, crypto_ahash_blocksize(tfm) / 2);
} else {
memset(ctx->base.opad.byte + keylen -
crypto_ahash_blocksize(tfm) / 2, 0,
crypto_ahash_blocksize(tfm) - keylen);
}
/* If doing fallback, still need to set the new key! */
ctx->fb_do_setkey = true;
return ret;
}
static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
memcpy(req->state, &ctx->base.ipad, SHA3_224_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_224_BLOCK_SIZE;
req->processed = SHA3_224_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
req->state_sz = SHA3_224_BLOCK_SIZE / 2;
req->digest_sz = SHA3_224_DIGEST_SIZE;
req->block_sz = SHA3_224_BLOCK_SIZE;
req->hmac = true;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_hmac_sha3_224_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_hmac_sha3_224_init(req) ?:
safexcel_ahash_finup(req);
/* HW cannot do zero length HMAC, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm)
{
return safexcel_hmac_sha3_cra_init(tfm, "sha3-224");
}
struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_hmac_sha3_224_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_hmac_sha3_224_digest,
.setkey = safexcel_hmac_sha3_setkey,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_224_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha3-224)",
.cra_driver_name = "safexcel-hmac-sha3-224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_hmac_sha3_224_cra_init,
.cra_exit = safexcel_hmac_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
memcpy(req->state, &ctx->base.ipad, SHA3_256_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_256_BLOCK_SIZE;
req->processed = SHA3_256_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
req->state_sz = SHA3_256_BLOCK_SIZE / 2;
req->digest_sz = SHA3_256_DIGEST_SIZE;
req->block_sz = SHA3_256_BLOCK_SIZE;
req->hmac = true;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_hmac_sha3_256_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_hmac_sha3_256_init(req) ?:
safexcel_ahash_finup(req);
/* HW cannot do zero length HMAC, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm)
{
return safexcel_hmac_sha3_cra_init(tfm, "sha3-256");
}
struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_hmac_sha3_256_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_hmac_sha3_256_digest,
.setkey = safexcel_hmac_sha3_setkey,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_256_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha3-256)",
.cra_driver_name = "safexcel-hmac-sha3-256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_hmac_sha3_256_cra_init,
.cra_exit = safexcel_hmac_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
memcpy(req->state, &ctx->base.ipad, SHA3_384_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_384_BLOCK_SIZE;
req->processed = SHA3_384_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
req->state_sz = SHA3_384_BLOCK_SIZE / 2;
req->digest_sz = SHA3_384_DIGEST_SIZE;
req->block_sz = SHA3_384_BLOCK_SIZE;
req->hmac = true;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_hmac_sha3_384_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_hmac_sha3_384_init(req) ?:
safexcel_ahash_finup(req);
/* HW cannot do zero length HMAC, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm)
{
return safexcel_hmac_sha3_cra_init(tfm, "sha3-384");
}
struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_hmac_sha3_384_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_hmac_sha3_384_digest,
.setkey = safexcel_hmac_sha3_setkey,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_384_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha3-384)",
.cra_driver_name = "safexcel-hmac-sha3-384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_hmac_sha3_384_cra_init,
.cra_exit = safexcel_hmac_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
memcpy(req->state, &ctx->base.ipad, SHA3_512_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_512_BLOCK_SIZE;
req->processed = SHA3_512_BLOCK_SIZE;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
req->state_sz = SHA3_512_BLOCK_SIZE / 2;
req->digest_sz = SHA3_512_DIGEST_SIZE;
req->block_sz = SHA3_512_BLOCK_SIZE;
req->hmac = true;
ctx->do_fallback = false;
ctx->fb_init_done = false;
return 0;
}
static int safexcel_hmac_sha3_512_digest(struct ahash_request *req)
{
if (req->nbytes)
return safexcel_hmac_sha3_512_init(req) ?:
safexcel_ahash_finup(req);
/* HW cannot do zero length HMAC, use fallback instead */
return safexcel_sha3_digest_fallback(req);
}
static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm)
{
return safexcel_hmac_sha3_cra_init(tfm, "sha3-512");
}
struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.algo_mask = SAFEXCEL_ALG_SHA3,
.alg.ahash = {
.init = safexcel_hmac_sha3_512_init,
.update = safexcel_sha3_update,
.final = safexcel_sha3_final,
.finup = safexcel_sha3_finup,
.digest = safexcel_hmac_sha3_512_digest,
.setkey = safexcel_hmac_sha3_setkey,
.export = safexcel_sha3_export,
.import = safexcel_sha3_import,
.halg = {
.digestsize = SHA3_512_DIGEST_SIZE,
.statesize = sizeof(struct safexcel_ahash_export_state),
.base = {
.cra_name = "hmac(sha3-512)",
.cra_driver_name = "safexcel-hmac-sha3-512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_hmac_sha3_512_cra_init,
.cra_exit = safexcel_hmac_sha3_cra_exit,
.cra_module = THIS_MODULE,
},
},
},
};
| linux-master | drivers/crypto/inside-secure/safexcel_hash.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx ZynqMP AES Driver.
* Copyright (c) 2020 Xilinx Inc.
*/
#include <crypto/aes.h>
#include <crypto/engine.h>
#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#define ZYNQMP_DMA_BIT_MASK 32U
#define ZYNQMP_AES_KEY_SIZE AES_KEYSIZE_256
#define ZYNQMP_AES_AUTH_SIZE 16U
#define ZYNQMP_KEY_SRC_SEL_KEY_LEN 1U
#define ZYNQMP_AES_BLK_SIZE 1U
#define ZYNQMP_AES_MIN_INPUT_BLK_SIZE 4U
#define ZYNQMP_AES_WORD_LEN 4U
#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
enum zynqmp_aead_op {
ZYNQMP_AES_DECRYPT = 0,
ZYNQMP_AES_ENCRYPT
};
enum zynqmp_aead_keysrc {
ZYNQMP_AES_KUP_KEY = 0,
ZYNQMP_AES_DEV_KEY,
ZYNQMP_AES_PUF_KEY
};
struct zynqmp_aead_drv_ctx {
union {
struct aead_engine_alg aead;
} alg;
struct device *dev;
struct crypto_engine *engine;
};
struct zynqmp_aead_hw_req {
u64 src;
u64 iv;
u64 key;
u64 dst;
u64 size;
u64 op;
u64 keysrc;
};
struct zynqmp_aead_tfm_ctx {
struct device *dev;
u8 key[ZYNQMP_AES_KEY_SIZE];
u8 *iv;
u32 keylen;
u32 authsize;
enum zynqmp_aead_keysrc keysrc;
struct crypto_aead *fbk_cipher;
};
struct zynqmp_aead_req_ctx {
enum zynqmp_aead_op op;
};
static int zynqmp_aes_aead_cipher(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct device *dev = tfm_ctx->dev;
struct zynqmp_aead_hw_req *hwreq;
dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
int ret;
size_t dma_size;
char *kbuf;
int err;
if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ GCM_AES_IV_SIZE;
else
dma_size = req->cryptlen + GCM_AES_IV_SIZE;
kbuf = dma_alloc_coherent(dev, dma_size, &dma_addr_data, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
hwreq = dma_alloc_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
&dma_addr_hw_req, GFP_KERNEL);
if (!hwreq) {
dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
return -ENOMEM;
}
data_size = req->cryptlen;
scatterwalk_map_and_copy(kbuf, req->src, 0, req->cryptlen, 0);
memcpy(kbuf + data_size, req->iv, GCM_AES_IV_SIZE);
hwreq->src = dma_addr_data;
hwreq->dst = dma_addr_data;
hwreq->iv = hwreq->src + data_size;
hwreq->keysrc = tfm_ctx->keysrc;
hwreq->op = rq_ctx->op;
if (hwreq->op == ZYNQMP_AES_ENCRYPT)
hwreq->size = data_size;
else
hwreq->size = data_size - ZYNQMP_AES_AUTH_SIZE;
if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY) {
memcpy(kbuf + data_size + GCM_AES_IV_SIZE,
tfm_ctx->key, ZYNQMP_AES_KEY_SIZE);
hwreq->key = hwreq->src + data_size + GCM_AES_IV_SIZE;
} else {
hwreq->key = 0;
}
ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
if (ret) {
dev_err(dev, "ERROR: AES PM API failed\n");
err = ret;
} else if (status) {
switch (status) {
case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
dev_err(dev, "ERROR: Gcm Tag mismatch\n");
break;
case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
dev_err(dev, "ERROR: Wrong KeySrc, enable secure mode\n");
break;
case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
dev_err(dev, "ERROR: PUF is not registered\n");
break;
default:
dev_err(dev, "ERROR: Unknown error\n");
break;
}
err = -status;
} else {
if (hwreq->op == ZYNQMP_AES_ENCRYPT)
data_size = data_size + ZYNQMP_AES_AUTH_SIZE;
else
data_size = data_size - ZYNQMP_AES_AUTH_SIZE;
sg_copy_from_buffer(req->dst, sg_nents(req->dst),
kbuf, data_size);
err = 0;
}
if (kbuf) {
memzero_explicit(kbuf, dma_size);
dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
}
if (hwreq) {
memzero_explicit(hwreq, sizeof(struct zynqmp_aead_hw_req));
dma_free_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
hwreq, dma_addr_hw_req);
}
return err;
}
static int zynqmp_fallback_check(struct zynqmp_aead_tfm_ctx *tfm_ctx,
struct aead_request *req)
{
int need_fallback = 0;
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
if (tfm_ctx->authsize != ZYNQMP_AES_AUTH_SIZE)
need_fallback = 1;
if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
tfm_ctx->keylen != ZYNQMP_AES_KEY_SIZE) {
need_fallback = 1;
}
if (req->assoclen != 0 ||
req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE) {
need_fallback = 1;
}
if ((req->cryptlen % ZYNQMP_AES_WORD_LEN) != 0)
need_fallback = 1;
if (rq_ctx->op == ZYNQMP_AES_DECRYPT &&
req->cryptlen <= ZYNQMP_AES_AUTH_SIZE) {
need_fallback = 1;
}
return need_fallback;
}
static int zynqmp_handle_aes_req(struct crypto_engine *engine,
void *req)
{
struct aead_request *areq =
container_of(req, struct aead_request, base);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(areq);
struct aead_request *subreq = aead_request_ctx(req);
int need_fallback;
int err;
need_fallback = zynqmp_fallback_check(tfm_ctx, areq);
if (need_fallback) {
aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
aead_request_set_callback(subreq, areq->base.flags,
NULL, NULL);
aead_request_set_crypt(subreq, areq->src, areq->dst,
areq->cryptlen, areq->iv);
aead_request_set_ad(subreq, areq->assoclen);
if (rq_ctx->op == ZYNQMP_AES_ENCRYPT)
err = crypto_aead_encrypt(subreq);
else
err = crypto_aead_decrypt(subreq);
} else {
err = zynqmp_aes_aead_cipher(areq);
}
crypto_finalize_aead_request(engine, areq, err);
return 0;
}
static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct zynqmp_aead_tfm_ctx *tfm_ctx =
(struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
unsigned char keysrc;
if (keylen == ZYNQMP_KEY_SRC_SEL_KEY_LEN) {
keysrc = *key;
if (keysrc == ZYNQMP_AES_KUP_KEY ||
keysrc == ZYNQMP_AES_DEV_KEY ||
keysrc == ZYNQMP_AES_PUF_KEY) {
tfm_ctx->keysrc = (enum zynqmp_aead_keysrc)keysrc;
} else {
tfm_ctx->keylen = keylen;
}
} else {
tfm_ctx->keylen = keylen;
if (keylen == ZYNQMP_AES_KEY_SIZE) {
tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
memcpy(tfm_ctx->key, key, keylen);
}
}
tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
}
static int zynqmp_aes_aead_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct zynqmp_aead_tfm_ctx *tfm_ctx =
(struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
tfm_ctx->authsize = authsize;
return crypto_aead_setauthsize(tfm_ctx->fbk_cipher, authsize);
}
static int zynqmp_aes_aead_encrypt(struct aead_request *req)
{
struct zynqmp_aead_drv_ctx *drv_ctx;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct aead_alg *alg = crypto_aead_alg(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
rq_ctx->op = ZYNQMP_AES_ENCRYPT;
drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
}
static int zynqmp_aes_aead_decrypt(struct aead_request *req)
{
struct zynqmp_aead_drv_ctx *drv_ctx;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct aead_alg *alg = crypto_aead_alg(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
rq_ctx->op = ZYNQMP_AES_DECRYPT;
drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
}
static int zynqmp_aes_aead_init(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct zynqmp_aead_tfm_ctx *tfm_ctx =
(struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
struct zynqmp_aead_drv_ctx *drv_ctx;
struct aead_alg *alg = crypto_aead_alg(aead);
drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
tfm_ctx->dev = drv_ctx->dev;
tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.base.cra_name,
0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tfm_ctx->fbk_cipher)) {
pr_err("%s() Error: failed to allocate fallback for %s\n",
__func__, drv_ctx->alg.aead.base.base.cra_name);
return PTR_ERR(tfm_ctx->fbk_cipher);
}
crypto_aead_set_reqsize(aead,
max(sizeof(struct zynqmp_aead_req_ctx),
sizeof(struct aead_request) +
crypto_aead_reqsize(tfm_ctx->fbk_cipher)));
return 0;
}
static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct zynqmp_aead_tfm_ctx *tfm_ctx =
(struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
if (tfm_ctx->fbk_cipher) {
crypto_free_aead(tfm_ctx->fbk_cipher);
tfm_ctx->fbk_cipher = NULL;
}
memzero_explicit(tfm_ctx, sizeof(struct zynqmp_aead_tfm_ctx));
}
static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
.alg.aead.base = {
.setkey = zynqmp_aes_aead_setkey,
.setauthsize = zynqmp_aes_aead_setauthsize,
.encrypt = zynqmp_aes_aead_encrypt,
.decrypt = zynqmp_aes_aead_decrypt,
.init = zynqmp_aes_aead_init,
.exit = zynqmp_aes_aead_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = ZYNQMP_AES_AUTH_SIZE,
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "xilinx-zynqmp-aes-gcm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = ZYNQMP_AES_BLK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
.cra_module = THIS_MODULE,
}
},
.alg.aead.op = {
.do_one_request = zynqmp_handle_aes_req,
},
};
static int zynqmp_aes_aead_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int err;
/* ZynqMP AES driver supports only one instance */
if (!aes_drv_ctx.dev)
aes_drv_ctx.dev = dev;
else
return -ENODEV;
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
return err;
}
aes_drv_ctx.engine = crypto_engine_alloc_init(dev, 1);
if (!aes_drv_ctx.engine) {
dev_err(dev, "Cannot alloc AES engine\n");
err = -ENOMEM;
goto err_engine;
}
err = crypto_engine_start(aes_drv_ctx.engine);
if (err) {
dev_err(dev, "Cannot start AES engine\n");
goto err_engine;
}
err = crypto_engine_register_aead(&aes_drv_ctx.alg.aead);
if (err < 0) {
dev_err(dev, "Failed to register AEAD alg.\n");
goto err_aead;
}
return 0;
err_aead:
crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
err_engine:
if (aes_drv_ctx.engine)
crypto_engine_exit(aes_drv_ctx.engine);
return err;
}
static int zynqmp_aes_aead_remove(struct platform_device *pdev)
{
crypto_engine_exit(aes_drv_ctx.engine);
crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
return 0;
}
static const struct of_device_id zynqmp_aes_dt_ids[] = {
{ .compatible = "xlnx,zynqmp-aes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
static struct platform_driver zynqmp_aes_driver = {
.probe = zynqmp_aes_aead_probe,
.remove = zynqmp_aes_aead_remove,
.driver = {
.name = "zynqmp-aes",
.of_match_table = zynqmp_aes_dt_ids,
},
};
module_platform_driver(zynqmp_aes_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/crypto/xilinx/zynqmp-aes-gcm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
*/
#include <linux/cacheflush.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha3.h>
#include <linux/crypto.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
enum zynqmp_sha_op {
ZYNQMP_SHA3_INIT = 1,
ZYNQMP_SHA3_UPDATE = 2,
ZYNQMP_SHA3_FINAL = 4,
};
struct zynqmp_sha_drv_ctx {
struct shash_alg sha3_384;
struct device *dev;
};
struct zynqmp_sha_tfm_ctx {
struct device *dev;
struct crypto_shash *fbk_tfm;
};
struct zynqmp_sha_desc_ctx {
struct shash_desc fbk_req;
};
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
struct shash_alg *alg = crypto_shash_alg(hash);
struct crypto_shash *fallback_tfm;
struct zynqmp_sha_drv_ctx *drv_ctx;
drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
tfm_ctx->dev = drv_ctx->dev;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
tfm_ctx->fbk_tfm = fallback_tfm;
hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
return 0;
}
static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
{
struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
if (tfm_ctx->fbk_tfm) {
crypto_free_shash(tfm_ctx->fbk_tfm);
tfm_ctx->fbk_tfm = NULL;
}
memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
}
static int zynqmp_sha_init(struct shash_desc *desc)
{
struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
dctx->fbk_req.tfm = tctx->fbk_tfm;
return crypto_shash_init(&dctx->fbk_req);
}
static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
return crypto_shash_update(&dctx->fbk_req, data, length);
}
static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
{
struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
return crypto_shash_final(&dctx->fbk_req, out);
}
static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
return crypto_shash_finup(&dctx->fbk_req, data, length, out);
}
static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
{
struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
dctx->fbk_req.tfm = tctx->fbk_tfm;
return crypto_shash_import(&dctx->fbk_req, in);
}
static int zynqmp_sha_export(struct shash_desc *desc, void *out)
{
struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
return crypto_shash_export(&dctx->fbk_req, out);
}
static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
unsigned int remaining_len = len;
int update_size;
int ret;
ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
if (ret)
return ret;
while (remaining_len != 0) {
memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
} else {
update_size = remaining_len;
remaining_len = 0;
}
memcpy(ubuf, data, update_size);
flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
if (ret)
return ret;
data += update_size;
}
ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
return ret;
}
static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
.final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
.digest = zynqmp_sha_digest,
.export = zynqmp_sha_export,
.import = zynqmp_sha_import,
.init_tfm = zynqmp_sha_init_tfm,
.exit_tfm = zynqmp_sha_exit_tfm,
.descsize = sizeof(struct zynqmp_sha_desc_ctx),
.statesize = sizeof(struct sha3_state),
.digestsize = SHA3_384_DIGEST_SIZE,
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "zynqmp-sha3-384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
}
};
static int zynqmp_sha_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int err;
u32 v;
/* Verify the hardware is present */
err = zynqmp_pm_get_api_version(&v);
if (err)
return err;
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
return err;
}
err = crypto_register_shash(&sha3_drv_ctx.sha3_384);
if (err < 0) {
dev_err(dev, "Failed to register shash alg.\n");
return err;
}
sha3_drv_ctx.dev = dev;
platform_set_drvdata(pdev, &sha3_drv_ctx);
ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL);
if (!ubuf) {
err = -ENOMEM;
goto err_shash;
}
fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL);
if (!fbuf) {
err = -ENOMEM;
goto err_mem;
}
return 0;
err_mem:
dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
err_shash:
crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
return err;
}
static int zynqmp_sha_remove(struct platform_device *pdev)
{
sha3_drv_ctx.dev = platform_get_drvdata(pdev);
dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
return 0;
}
static struct platform_driver zynqmp_sha_driver = {
.probe = zynqmp_sha_probe,
.remove = zynqmp_sha_remove,
.driver = {
.name = "zynqmp-sha3-384",
},
};
module_platform_driver(zynqmp_sha_driver);
MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>");
| linux-master | drivers/crypto/xilinx/zynqmp-sha.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sl3516-ce-rng.c - hardware cryptographic offloader for SL3516 SoC.
*
* Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
*
* This file handle the RNG found in the SL3516 crypto engine
*/
#include "sl3516-ce.h"
#include <linux/pm_runtime.h>
#include <linux/hw_random.h>
static int sl3516_ce_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct sl3516_ce_dev *ce;
u32 *data = buf;
size_t read = 0;
int err;
ce = container_of(rng, struct sl3516_ce_dev, trng);
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
ce->hwrng_stat_req++;
ce->hwrng_stat_bytes += max;
#endif
err = pm_runtime_get_sync(ce->dev);
if (err < 0) {
pm_runtime_put_noidle(ce->dev);
return err;
}
while (read < max) {
*data = readl(ce->base + IPSEC_RAND_NUM_REG);
data++;
read += 4;
}
pm_runtime_put(ce->dev);
return read;
}
int sl3516_ce_rng_register(struct sl3516_ce_dev *ce)
{
int ret;
ce->trng.name = "SL3516 Crypto Engine RNG";
ce->trng.read = sl3516_ce_rng_read;
ce->trng.quality = 700;
ret = hwrng_register(&ce->trng);
if (ret)
dev_err(ce->dev, "Fail to register the RNG\n");
return ret;
}
void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce)
{
hwrng_unregister(&ce->trng);
}
| linux-master | drivers/crypto/gemini/sl3516-ce-rng.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
*
* Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
*
* This file adds support for AES cipher with 128,192,256 bits keysize in
* ECB mode.
*/
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "sl3516-ce.h"
/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_dev *ce = op->ce;
struct scatterlist *in_sg;
struct scatterlist *out_sg;
struct scatterlist *sg;
if (areq->cryptlen == 0 || areq->cryptlen % 16) {
ce->fallback_mod16++;
return true;
}
/*
* check if we have enough descriptors for TX
* Note: TX need one control desc for each SG
*/
if (sg_nents(areq->src) > MAXDESC / 2) {
ce->fallback_sg_count_tx++;
return true;
}
/* check if we have enough descriptors for RX */
if (sg_nents(areq->dst) > MAXDESC) {
ce->fallback_sg_count_rx++;
return true;
}
sg = areq->src;
while (sg) {
if ((sg->length % 16) != 0) {
ce->fallback_mod16++;
return true;
}
if ((sg_dma_len(sg) % 16) != 0) {
ce->fallback_mod16++;
return true;
}
if (!IS_ALIGNED(sg->offset, 16)) {
ce->fallback_align16++;
return true;
}
sg = sg_next(sg);
}
sg = areq->dst;
while (sg) {
if ((sg->length % 16) != 0) {
ce->fallback_mod16++;
return true;
}
if ((sg_dma_len(sg) % 16) != 0) {
ce->fallback_mod16++;
return true;
}
if (!IS_ALIGNED(sg->offset, 16)) {
ce->fallback_align16++;
return true;
}
sg = sg_next(sg);
}
/* need same numbers of SG (with same length) for source and destination */
in_sg = areq->src;
out_sg = areq->dst;
while (in_sg && out_sg) {
if (in_sg->length != out_sg->length) {
ce->fallback_not_same_len++;
return true;
}
in_sg = sg_next(in_sg);
out_sg = sg_next(out_sg);
}
if (in_sg || out_sg)
return true;
return false;
}
static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sl3516_ce_alg_template *algt;
int err;
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
algt->stat_fb++;
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
areq->base.complete, areq->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (rctx->op_dir == CE_DECRYPTION)
err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
static int sl3516_ce_cipher(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_dev *ce = op->ce;
struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sl3516_ce_alg_template *algt;
struct scatterlist *sg;
unsigned int todo, len;
struct pkt_control_ecb *ecb;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
int i;
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
areq->cryptlen,
rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
op->keylen);
algt->stat_req++;
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = nr_sgs;
} else {
nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
err = -EINVAL;
goto theend_sgs;
}
}
len = areq->cryptlen;
i = 0;
sg = areq->src;
while (i < nr_sgs && sg && len) {
if (sg_dma_len(sg) == 0)
goto sgs_next;
rctx->t_src[i].addr = sg_dma_address(sg);
todo = min(len, sg_dma_len(sg));
rctx->t_src[i].len = todo;
dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
len -= todo;
i++;
sgs_next:
sg = sg_next(sg);
}
if (len > 0) {
dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
err = -EINVAL;
goto theend_sgs;
}
len = areq->cryptlen;
i = 0;
sg = areq->dst;
while (i < nr_sgd && sg && len) {
if (sg_dma_len(sg) == 0)
goto sgd_next;
rctx->t_dst[i].addr = sg_dma_address(sg);
todo = min(len, sg_dma_len(sg));
rctx->t_dst[i].len = todo;
dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
len -= todo;
i++;
sgd_next:
sg = sg_next(sg);
}
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
goto theend_sgs;
}
switch (algt->mode) {
case ECB_AES:
rctx->pctrllen = sizeof(struct pkt_control_ecb);
ecb = (struct pkt_control_ecb *)ce->pctrl;
rctx->tqflag = TQ0_TYPE_CTRL;
rctx->tqflag |= TQ1_CIPHER;
ecb->control.op_mode = rctx->op_dir;
ecb->control.cipher_algorithm = ECB_AES;
ecb->cipher.header_len = 0;
ecb->cipher.algorithm_len = areq->cryptlen;
cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
rctx->h = &ecb->cipher;
rctx->tqflag |= TQ4_KEY0;
rctx->tqflag |= TQ5_KEY4;
rctx->tqflag |= TQ6_KEY6;
ecb->control.aesnk = op->keylen / 4;
break;
}
rctx->nr_sgs = nr_sgs;
rctx->nr_sgd = nr_sgd;
err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
theend_sgs:
if (areq->src == areq->dst) {
dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
}
theend:
return err;
}
int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sl3516_ce_cipher(breq);
local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
local_bh_enable();
return 0;
}
int sl3516_ce_skdecrypt(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct crypto_engine *engine;
memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
rctx->op_dir = CE_DECRYPTION;
if (sl3516_ce_need_fallback(areq))
return sl3516_ce_cipher_fallback(areq);
engine = op->ce->engine;
return crypto_transfer_skcipher_request_to_engine(engine, areq);
}
int sl3516_ce_skencrypt(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct crypto_engine *engine;
memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
rctx->op_dir = CE_ENCRYPTION;
if (sl3516_ce_need_fallback(areq))
return sl3516_ce_cipher_fallback(areq);
engine = op->ce->engine;
return crypto_transfer_skcipher_request_to_engine(engine, areq);
}
int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
{
struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sl3516_ce_alg_template *algt;
const char *name = crypto_tfm_alg_name(tfm);
struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
int err;
memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
dev_info(op->ce->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
pm_runtime_put_noidle(op->ce->dev);
crypto_free_skcipher(op->fallback_tfm);
return err;
}
void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
{
struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync_suspend(op->ce->dev);
}
int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_dev *ce = op->ce;
switch (keylen) {
case 128 / 8:
break;
case 192 / 8:
break;
case 256 / 8:
break;
default:
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
return -ENOMEM;
crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
| linux-master | drivers/crypto/gemini/sl3516-ce-cipher.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC
*
* Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
*
* Core file which registers crypto algorithms supported by the CryptoEngine
*/
#include <crypto/engine.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dev_printk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "sl3516-ce.h"
static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
{
const size_t sz = sizeof(struct descriptor) * MAXDESC;
int i;
ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
if (!ce->tx)
return -ENOMEM;
ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
if (!ce->rx)
goto err_rx;
for (i = 0; i < MAXDESC; i++) {
ce->tx[i].frame_ctrl.bits.own = CE_CPU;
ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
}
ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
for (i = 0; i < MAXDESC; i++) {
ce->rx[i].frame_ctrl.bits.own = CE_CPU;
ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
}
ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
&ce->dctrl, GFP_KERNEL);
if (!ce->pctrl)
goto err_pctrl;
return 0;
err_pctrl:
dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
err_rx:
dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
return -ENOMEM;
}
static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
{
const size_t sz = sizeof(struct descriptor) * MAXDESC;
dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
ce->dctrl);
}
static void start_dma_tx(struct sl3516_ce_dev *ce)
{
u32 v;
v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
writel(v, ce->base + IPSEC_TXDMA_CTRL);
}
static void start_dma_rx(struct sl3516_ce_dev *ce)
{
u32 v;
v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
writel(v, ce->base + IPSEC_RXDMA_CTRL);
}
static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
{
struct descriptor *dd;
dd = &ce->tx[ce->ctx];
ce->ctx++;
if (ce->ctx >= MAXDESC)
ce->ctx = 0;
return dd;
}
static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
{
struct descriptor *rdd;
rdd = &ce->rx[ce->crx];
ce->crx++;
if (ce->crx >= MAXDESC)
ce->crx = 0;
return rdd;
}
int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
const char *name)
{
struct descriptor *dd, *rdd = NULL;
u32 v;
int i, err = 0;
ce->stat_req++;
reinit_completion(&ce->complete);
ce->status = 0;
for (i = 0; i < rctx->nr_sgd; i++) {
dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
i, rctx->nr_sgd, rctx->t_dst[i].len);
rdd = get_desc_rx(ce);
rdd->buf_adr = rctx->t_dst[i].addr;
rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
rdd->frame_ctrl.bits.own = CE_DMA;
}
rdd->next_desc.bits.eofie = 1;
for (i = 0; i < rctx->nr_sgs; i++) {
dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
i, rctx->nr_sgs, rctx->t_src[i].len);
rctx->h->algorithm_len = rctx->t_src[i].len;
dd = get_desc_tx(ce);
dd->frame_ctrl.raw = 0;
dd->flag_status.raw = 0;
dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
dd->buf_adr = ce->dctrl;
dd->flag_status.tx_flag.tqflag = rctx->tqflag;
dd->next_desc.bits.eofie = 0;
dd->next_desc.bits.dec = 0;
dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
dd->frame_ctrl.bits.own = CE_DMA;
dd = get_desc_tx(ce);
dd->frame_ctrl.raw = 0;
dd->flag_status.raw = 0;
dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
dd->buf_adr = rctx->t_src[i].addr;
dd->flag_status.tx_flag.tqflag = 0;
dd->next_desc.bits.eofie = 0;
dd->next_desc.bits.dec = 0;
dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
dd->frame_ctrl.bits.own = CE_DMA;
start_dma_tx(ce);
start_dma_rx(ce);
}
wait_for_completion_interruptible_timeout(&ce->complete,
msecs_to_jiffies(5000));
if (ce->status == 0) {
dev_err(ce->dev, "DMA timeout for %s\n", name);
err = -EFAULT;
}
v = readl(ce->base + IPSEC_STATUS_REG);
if (v & 0xFFF) {
dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
err = -EFAULT;
}
return err;
}
static irqreturn_t ce_irq_handler(int irq, void *data)
{
struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
u32 v;
ce->stat_irq++;
v = readl(ce->base + IPSEC_DMA_STATUS);
writel(v, ce->base + IPSEC_DMA_STATUS);
if (v & DMA_STATUS_TS_DERR)
dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
if (v & DMA_STATUS_TS_PERR)
dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
if (v & DMA_STATUS_RS_DERR)
dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
if (v & DMA_STATUS_RS_PERR)
dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
if (v & DMA_STATUS_TS_EOFI)
ce->stat_irq_tx++;
if (v & DMA_STATUS_RS_EOFI) {
ce->status = 1;
complete(&ce->complete);
ce->stat_irq_rx++;
return IRQ_HANDLED;
}
return IRQ_HANDLED;
}
static struct sl3516_ce_alg_template ce_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.mode = ECB_AES,
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sl3516",
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
.cra_init = sl3516_ce_cipher_init,
.cra_exit = sl3516_ce_cipher_exit,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = sl3516_ce_aes_setkey,
.encrypt = sl3516_ce_skencrypt,
.decrypt = sl3516_ce_skdecrypt,
},
.alg.skcipher.op = {
.do_one_request = sl3516_ce_handle_cipher_request,
},
},
};
static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sl3516_ce_dev *ce = seq->private;
unsigned int i;
seq_printf(seq, "HWRNG %lu %lu\n",
ce->hwrng_stat_req, ce->hwrng_stat_bytes);
seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
seq_printf(seq, "nreq %lu\n", ce->stat_req);
seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
continue;
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.skcipher.base.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
break;
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
{
int err;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
ce_algs[i].ce = ce;
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "DEBUG: Register %s\n",
ce_algs[i].alg.skcipher.base.base.cra_name);
err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "Fail to register %s\n",
ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
break;
default:
ce_algs[i].ce = NULL;
dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
}
}
return 0;
}
static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
continue;
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
ce_algs[i].alg.skcipher.base.base.cra_name);
crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
}
}
}
static void sl3516_ce_start(struct sl3516_ce_dev *ce)
{
ce->ctx = 0;
ce->crx = 0;
writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
writel(0, ce->base + IPSEC_DMA_STATUS);
}
/*
* Power management strategy: The device is suspended unless a TFM exists for
* one of the algorithms proposed by this driver.
*/
static int sl3516_ce_pm_suspend(struct device *dev)
{
struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
reset_control_assert(ce->reset);
clk_disable_unprepare(ce->clks);
return 0;
}
static int sl3516_ce_pm_resume(struct device *dev)
{
struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(ce->clks);
if (err) {
dev_err(ce->dev, "Cannot prepare_enable\n");
goto error;
}
err = reset_control_deassert(ce->reset);
if (err) {
dev_err(ce->dev, "Cannot deassert reset control\n");
goto error;
}
sl3516_ce_start(ce);
return 0;
error:
sl3516_ce_pm_suspend(dev);
return err;
}
static const struct dev_pm_ops sl3516_ce_pm_ops = {
SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
};
static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
{
int err;
pm_runtime_use_autosuspend(ce->dev);
pm_runtime_set_autosuspend_delay(ce->dev, 2000);
err = pm_runtime_set_suspended(ce->dev);
if (err)
return err;
pm_runtime_enable(ce->dev);
return err;
}
static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
{
pm_runtime_disable(ce->dev);
}
static int sl3516_ce_probe(struct platform_device *pdev)
{
struct sl3516_ce_dev *ce;
int err, irq;
u32 v;
ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
if (!ce)
return -ENOMEM;
ce->dev = &pdev->dev;
platform_set_drvdata(pdev, ce);
ce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ce->base))
return PTR_ERR(ce->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
if (err) {
dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
return err;
}
ce->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ce->reset))
return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
"No reset control found\n");
ce->clks = devm_clk_get(ce->dev, NULL);
if (IS_ERR(ce->clks)) {
err = PTR_ERR(ce->clks);
dev_err(ce->dev, "Cannot get clock err=%d\n", err);
return err;
}
err = sl3516_ce_desc_init(ce);
if (err)
return err;
err = sl3516_ce_pm_init(ce);
if (err)
goto error_pm;
init_completion(&ce->complete);
ce->engine = crypto_engine_alloc_init(ce->dev, true);
if (!ce->engine) {
dev_err(ce->dev, "Cannot allocate engine\n");
err = -ENOMEM;
goto error_engine;
}
err = crypto_engine_start(ce->engine);
if (err) {
dev_err(ce->dev, "Cannot start engine\n");
goto error_engine;
}
err = sl3516_ce_register_algs(ce);
if (err)
goto error_alg;
err = sl3516_ce_rng_register(ce);
if (err)
goto error_rng;
err = pm_runtime_resume_and_get(ce->dev);
if (err < 0)
goto error_pmuse;
v = readl(ce->base + IPSEC_ID);
dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
v & GENMASK(31, 4),
v & GENMASK(3, 0));
v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
v & GENMASK(15, 4),
v & GENMASK(3, 0));
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
struct dentry *dbgfs_dir __maybe_unused;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
dbgfs_dir = debugfs_create_dir("sl3516", NULL);
dbgfs_stats = debugfs_create_file("stats", 0444,
dbgfs_dir, ce,
&sl3516_ce_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
ce->dbgfs_dir = dbgfs_dir;
ce->dbgfs_stats = dbgfs_stats;
#endif
}
return 0;
error_pmuse:
sl3516_ce_rng_unregister(ce);
error_rng:
sl3516_ce_unregister_algs(ce);
error_alg:
crypto_engine_exit(ce->engine);
error_engine:
sl3516_ce_pm_exit(ce);
error_pm:
sl3516_ce_free_descs(ce);
return err;
}
static int sl3516_ce_remove(struct platform_device *pdev)
{
struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
sl3516_ce_rng_unregister(ce);
sl3516_ce_unregister_algs(ce);
crypto_engine_exit(ce->engine);
sl3516_ce_pm_exit(ce);
sl3516_ce_free_descs(ce);
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
return 0;
}
static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
{ .compatible = "cortina,sl3516-crypto"},
{}
};
MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
static struct platform_driver sl3516_ce_driver = {
.probe = sl3516_ce_probe,
.remove = sl3516_ce_remove,
.driver = {
.name = "sl3516-crypto",
.pm = &sl3516_ce_pm_ops,
.of_match_table = sl3516_ce_crypto_of_match_table,
},
};
module_platform_driver(sl3516_ce_driver);
MODULE_DESCRIPTION("SL3516 cryptographic offloader");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
| linux-master | drivers/crypto/gemini/sl3516-ce-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* amlgoic-core.c - hardware cryptographic offloader for Amlogic GXL SoC
*
* Copyright (C) 2018-2019 Corentin Labbe <clabbe@baylibre.com>
*
* Core file which registers crypto algorithms supported by the hardware.
*/
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "amlogic-gxl.h"
static irqreturn_t meson_irq_handler(int irq, void *data)
{
struct meson_dev *mc = (struct meson_dev *)data;
int flow;
u32 p;
for (flow = 0; flow < MAXFLOW; flow++) {
if (mc->irqs[flow] == irq) {
p = readl(mc->base + ((0x04 + flow) << 2));
if (p) {
writel_relaxed(0xF, mc->base + ((0x4 + flow) << 2));
mc->chanlist[flow].status = 1;
complete(&mc->chanlist[flow].complete);
return IRQ_HANDLED;
}
dev_err(mc->dev, "%s %d Got irq for flow %d but ctrl is empty\n", __func__, irq, flow);
}
}
dev_err(mc->dev, "%s %d from unknown irq\n", __func__, irq);
return IRQ_HANDLED;
}
static struct meson_alg_template mc_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_CBC,
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-gxl",
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
.cra_init = meson_cipher_init,
.cra_exit = meson_cipher_exit,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
},
.alg.skcipher.op = {
.do_one_request = meson_handle_cipher_request,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_ECB,
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-gxl",
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
.cra_init = meson_cipher_init,
.cra_exit = meson_cipher_exit,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
},
.alg.skcipher.op = {
.do_one_request = meson_handle_cipher_request,
},
},
};
static int meson_debugfs_show(struct seq_file *seq, void *v)
{
struct meson_dev *mc __maybe_unused = seq->private;
int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i,
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc->chanlist[i].stat_req);
#else
0ul);
#endif
for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s %lu %lu\n",
mc_algs[i].alg.skcipher.base.base.cra_driver_name,
mc_algs[i].alg.skcipher.base.base.cra_name,
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc_algs[i].stat_req, mc_algs[i].stat_fb);
#else
0ul, 0ul);
#endif
break;
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
static void meson_free_chanlist(struct meson_dev *mc, int i)
{
while (i >= 0) {
crypto_engine_exit(mc->chanlist[i].engine);
if (mc->chanlist[i].tl)
dma_free_coherent(mc->dev, sizeof(struct meson_desc) * MAXDESC,
mc->chanlist[i].tl,
mc->chanlist[i].t_phy);
i--;
}
}
/*
* Allocate the channel list structure
*/
static int meson_allocate_chanlist(struct meson_dev *mc)
{
int i, err;
mc->chanlist = devm_kcalloc(mc->dev, MAXFLOW,
sizeof(struct meson_flow), GFP_KERNEL);
if (!mc->chanlist)
return -ENOMEM;
for (i = 0; i < MAXFLOW; i++) {
init_completion(&mc->chanlist[i].complete);
mc->chanlist[i].engine = crypto_engine_alloc_init(mc->dev, true);
if (!mc->chanlist[i].engine) {
dev_err(mc->dev, "Cannot allocate engine\n");
i--;
err = -ENOMEM;
goto error_engine;
}
err = crypto_engine_start(mc->chanlist[i].engine);
if (err) {
dev_err(mc->dev, "Cannot start engine\n");
goto error_engine;
}
mc->chanlist[i].tl = dma_alloc_coherent(mc->dev,
sizeof(struct meson_desc) * MAXDESC,
&mc->chanlist[i].t_phy,
GFP_KERNEL);
if (!mc->chanlist[i].tl) {
err = -ENOMEM;
goto error_engine;
}
}
return 0;
error_engine:
meson_free_chanlist(mc, i);
return err;
}
static int meson_register_algs(struct meson_dev *mc)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
mc_algs[i].mc = mc;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher);
if (err) {
dev_err(mc->dev, "Fail to register %s\n",
mc_algs[i].alg.skcipher.base.base.cra_name);
mc_algs[i].mc = NULL;
return err;
}
break;
}
}
return 0;
}
static void meson_unregister_algs(struct meson_dev *mc)
{
int i;
for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
if (!mc_algs[i].mc)
continue;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher);
break;
}
}
}
static int meson_crypto_probe(struct platform_device *pdev)
{
struct meson_dev *mc;
int err, i;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
mc->dev = &pdev->dev;
platform_set_drvdata(pdev, mc);
mc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mc->base)) {
err = PTR_ERR(mc->base);
dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
return err;
}
mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
if (IS_ERR(mc->busclk)) {
err = PTR_ERR(mc->busclk);
dev_err(&pdev->dev, "Cannot get core clock err=%d\n", err);
return err;
}
for (i = 0; i < MAXFLOW; i++) {
mc->irqs[i] = platform_get_irq(pdev, i);
if (mc->irqs[i] < 0)
return mc->irqs[i];
err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
"gxl-crypto", mc);
if (err < 0) {
dev_err(mc->dev, "Cannot request IRQ for flow %d\n", i);
return err;
}
}
err = clk_prepare_enable(mc->busclk);
if (err != 0) {
dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
return err;
}
err = meson_allocate_chanlist(mc);
if (err)
goto error_flow;
err = meson_register_algs(mc);
if (err)
goto error_alg;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) {
struct dentry *dbgfs_dir;
dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc->dbgfs_dir = dbgfs_dir;
#endif
}
return 0;
error_alg:
meson_unregister_algs(mc);
error_flow:
meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return err;
}
static int meson_crypto_remove(struct platform_device *pdev)
{
struct meson_dev *mc = platform_get_drvdata(pdev);
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
debugfs_remove_recursive(mc->dbgfs_dir);
#endif
meson_unregister_algs(mc);
meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return 0;
}
static const struct of_device_id meson_crypto_of_match_table[] = {
{ .compatible = "amlogic,gxl-crypto", },
{}
};
MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
static struct platform_driver meson_crypto_driver = {
.probe = meson_crypto_probe,
.remove = meson_crypto_remove,
.driver = {
.name = "gxl-crypto",
.of_match_table = meson_crypto_of_match_table,
},
};
module_platform_driver(meson_crypto_driver);
MODULE_DESCRIPTION("Amlogic GXL cryptographic offloader");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
| linux-master | drivers/crypto/amlogic/amlogic-gxl-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
*
* Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
*
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*/
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <crypto/internal/skcipher.h>
#include "amlogic-gxl.h"
static int get_engine_number(struct meson_dev *mc)
{
return atomic_inc_return(&mc->flow) % MAXFLOW;
}
static bool meson_cipher_need_fallback(struct skcipher_request *areq)
{
struct scatterlist *src_sg = areq->src;
struct scatterlist *dst_sg = areq->dst;
if (areq->cryptlen == 0)
return true;
if (sg_nents(src_sg) != sg_nents(dst_sg))
return true;
/* KEY/IV descriptors use 3 desc */
if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
return true;
while (src_sg && dst_sg) {
if ((src_sg->length % 16) != 0)
return true;
if ((dst_sg->length % 16) != 0)
return true;
if (src_sg->length != dst_sg->length)
return true;
if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
return true;
if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
return true;
src_sg = sg_next(src_sg);
dst_sg = sg_next(dst_sg);
}
return false;
}
static int meson_cipher_do_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
algt->stat_fb++;
#endif
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
areq->base.complete, areq->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (rctx->op_dir == MESON_DECRYPT)
err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
static int meson_cipher(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct meson_dev *mc = op->mc;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
int flow = rctx->flow;
unsigned int todo, eat, len;
struct scatterlist *src_sg = areq->src;
struct scatterlist *dst_sg = areq->dst;
struct meson_desc *desc;
int nr_sgs, nr_sgd;
int i, err = 0;
unsigned int keyivlen, ivsize, offset, tloffset;
dma_addr_t phykeyiv;
void *backup_iv = NULL, *bkeyiv;
u32 v;
algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
areq->cryptlen,
rctx->op_dir, crypto_skcipher_ivsize(tfm),
op->keylen, flow);
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
algt->stat_req++;
mc->chanlist[flow].stat_req++;
#endif
/*
* The hardware expect a list of meson_desc structures.
* The 2 first structures store key
* The third stores IV
*/
bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
if (!bkeyiv)
return -ENOMEM;
memcpy(bkeyiv, op->key, op->keylen);
keyivlen = op->keylen;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && ivsize > 0) {
if (ivsize > areq->cryptlen) {
dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
err = -EINVAL;
goto theend;
}
memcpy(bkeyiv + 32, areq->iv, ivsize);
keyivlen = 48;
if (rctx->op_dir == MESON_DECRYPT) {
backup_iv = kzalloc(ivsize, GFP_KERNEL);
if (!backup_iv) {
err = -ENOMEM;
goto theend;
}
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(backup_iv, areq->src, offset,
ivsize, 0);
}
}
if (keyivlen == 24)
keyivlen = 32;
phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
DMA_TO_DEVICE);
err = dma_mapping_error(mc->dev, phykeyiv);
if (err) {
dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
goto theend;
}
tloffset = 0;
eat = 0;
i = 0;
while (keyivlen > eat) {
desc = &mc->chanlist[flow].tl[tloffset];
memset(desc, 0, sizeof(struct meson_desc));
todo = min(keyivlen - eat, 16u);
desc->t_src = cpu_to_le32(phykeyiv + i * 16);
desc->t_dst = cpu_to_le32(i * 16);
v = (MODE_KEY << 20) | DESC_OWN | 16;
desc->t_status = cpu_to_le32(v);
eat += todo;
i++;
tloffset++;
}
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
if (!nr_sgs) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = nr_sgs;
} else {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (!nr_sgs || nr_sgs > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
if (!nr_sgd || nr_sgd > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
err = -EINVAL;
goto theend;
}
}
src_sg = areq->src;
dst_sg = areq->dst;
len = areq->cryptlen;
while (src_sg) {
desc = &mc->chanlist[flow].tl[tloffset];
memset(desc, 0, sizeof(struct meson_desc));
desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
todo = min(len, sg_dma_len(src_sg));
v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
if (rctx->op_dir)
v |= DESC_ENCRYPTION;
len -= todo;
if (!sg_next(src_sg))
v |= DESC_LAST;
desc->t_status = cpu_to_le32(v);
tloffset++;
src_sg = sg_next(src_sg);
dst_sg = sg_next(dst_sg);
}
reinit_completion(&mc->chanlist[flow].complete);
mc->chanlist[flow].status = 0;
writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
msecs_to_jiffies(500));
if (mc->chanlist[flow].status == 0) {
dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
err = -EINVAL;
}
dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
if (areq->src == areq->dst) {
dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
}
if (areq->iv && ivsize > 0) {
if (rctx->op_dir == MESON_DECRYPT) {
memcpy(areq->iv, backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst,
areq->cryptlen - ivsize,
ivsize, 0);
}
}
theend:
kfree_sensitive(bkeyiv);
kfree_sensitive(backup_iv);
return err;
}
int meson_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = meson_cipher(breq);
local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
local_bh_enable();
return 0;
}
int meson_skdecrypt(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct crypto_engine *engine;
int e;
rctx->op_dir = MESON_DECRYPT;
if (meson_cipher_need_fallback(areq))
return meson_cipher_do_fallback(areq);
e = get_engine_number(op->mc);
engine = op->mc->chanlist[e].engine;
rctx->flow = e;
return crypto_transfer_skcipher_request_to_engine(engine, areq);
}
int meson_skencrypt(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct crypto_engine *engine;
int e;
rctx->op_dir = MESON_ENCRYPT;
if (meson_cipher_need_fallback(areq))
return meson_cipher_do_fallback(areq);
e = get_engine_number(op->mc);
engine = op->mc->chanlist[e].engine;
rctx->flow = e;
return crypto_transfer_skcipher_request_to_engine(engine, areq);
}
int meson_cipher_init(struct crypto_tfm *tfm)
{
struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct meson_alg_template *algt;
const char *name = crypto_tfm_alg_name(tfm);
struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
op->mc = algt->mc;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
return 0;
}
void meson_cipher_exit(struct crypto_tfm *tfm)
{
struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
}
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct meson_dev *mc = op->mc;
switch (keylen) {
case 128 / 8:
op->keymode = MODE_AES_128;
break;
case 192 / 8:
op->keymode = MODE_AES_192;
break;
case 256 / 8:
op->keymode = MODE_AES_256;
break;
default:
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
return -ENOMEM;
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
| linux-master | drivers/crypto/amlogic/amlogic-gxl-cipher.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Asymmetric algorithms supported by virtio crypto device
*
* Authors: zhenwei pi <pizhenwei@bytedance.com>
* lei he <helei.sig11@bytedance.com>
*
* Copyright 2022 Bytedance CO., LTD.
*/
#include <crypto/engine.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/mpi.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
struct virtio_crypto_rsa_ctx {
MPI n;
};
struct virtio_crypto_akcipher_ctx {
struct virtio_crypto *vcrypto;
struct crypto_akcipher *tfm;
bool session_valid;
__u64 session_id;
union {
struct virtio_crypto_rsa_ctx rsa_ctx;
};
};
struct virtio_crypto_akcipher_request {
struct virtio_crypto_request base;
struct virtio_crypto_akcipher_ctx *akcipher_ctx;
struct akcipher_request *akcipher_req;
void *src_buf;
void *dst_buf;
uint32_t opcode;
};
struct virtio_crypto_akcipher_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
struct akcipher_engine_alg algo;
};
static DEFINE_MUTEX(algs_lock);
static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
kfree(vc_akcipher_req->src_buf);
kfree(vc_akcipher_req->dst_buf);
vc_akcipher_req->src_buf = NULL;
vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
}
static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
{
struct virtio_crypto_akcipher_request *vc_akcipher_req =
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
struct akcipher_request *akcipher_req;
int error;
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
case VIRTIO_CRYPTO_KEY_REJECTED:
error = -EKEYREJECTED;
break;
default:
error = -EIO;
break;
}
akcipher_req = vc_akcipher_req->akcipher_req;
if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
/* actuall length maybe less than dst buffer */
akcipher_req->dst_len = len - sizeof(vc_req->status);
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
}
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
struct virtio_crypto_ctrl_header *header, void *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
struct virtio_crypto *vcrypto = ctx->vcrypto;
uint8_t *pkey;
int err;
unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_session_input *input;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
pkey = kmemdup(key, keylen, GFP_KERNEL);
if (!pkey)
return -ENOMEM;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req) {
err = -ENOMEM;
goto out;
}
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
memcpy(&ctrl->u, para, sizeof(ctrl->u));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&key_sg, pkey, keylen);
sgs[num_out++] = &key_sg;
sg_init_one(&inhdr_sg, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(input->status));
err = -EINVAL;
goto out;
}
ctx->session_id = le64_to_cpu(input->session_id);
ctx->session_valid = true;
err = 0;
out:
kfree(vc_ctrl_req);
kfree_sensitive(pkey);
return err;
}
static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
{
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
unsigned int num_out = 0, num_in = 0;
int err;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_inhdr *ctrl_status;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
if (!ctx->session_valid)
return 0;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req)
return -ENOMEM;
ctrl_status = &vc_ctrl_req->ctrl_status;
ctrl_status->status = VIRTIO_CRYPTO_ERR;
ctrl = &vc_ctrl_req->ctrl;
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
ctrl->header.queue_id = 0;
destroy_session = &ctrl->u.destroy_session;
destroy_session->session_id = cpu_to_le64(ctx->session_id);
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &inhdr_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id);
err = -EINVAL;
goto out;
}
err = 0;
ctx->session_valid = false;
out:
kfree(vc_ctrl_req);
return err;
}
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, struct data_queue *data_vq)
{
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
void *src_buf = NULL, *dst_buf = NULL;
unsigned int num_out = 0, num_in = 0;
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
int ret = -ENOMEM;
bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
/* out header */
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr_sg;
/* src data */
src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
if (!src_buf)
goto err;
if (verify) {
/* for verify operation, both src and dst data work as OUT direction */
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
sg_init_one(&srcdata_sg, src_buf, src_len);
sgs[num_out++] = &srcdata_sg;
} else {
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
sg_init_one(&srcdata_sg, src_buf, src_len);
sgs[num_out++] = &srcdata_sg;
/* dst data */
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
if (!dst_buf)
goto err;
sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
sgs[num_out + num_in++] = &dstdata_sg;
}
vc_akcipher_req->src_buf = src_buf;
vc_akcipher_req->dst_buf = dst_buf;
/* in header */
sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
sgs[num_out + num_in++] = &inhdr_sg;
spin_lock_irqsave(&data_vq->lock, flags);
ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
virtqueue_kick(data_vq->vq);
spin_unlock_irqrestore(&data_vq->lock, flags);
if (ret)
goto err;
return 0;
err:
kfree(src_buf);
kfree(dst_buf);
return -ENOMEM;
}
static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
{
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct data_queue *data_vq = vc_req->dataq;
struct virtio_crypto_op_header *header;
struct virtio_crypto_akcipher_data_req *akcipher_req;
int ret;
vc_req->sgs = NULL;
vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
if (!vc_req->req_data)
return -ENOMEM;
/* build request header */
header = &vc_req->req_data->header;
header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
header->session_id = cpu_to_le64(ctx->session_id);
/* build request akcipher data */
akcipher_req = &vc_req->req_data->u.akcipher_req;
akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
if (ret < 0) {
kfree_sensitive(vc_req->req_data);
vc_req->req_data = NULL;
return ret;
}
return 0;
}
static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
{
struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
vc_akcipher_req->akcipher_ctx = ctx;
vc_akcipher_req->akcipher_req = req;
vc_akcipher_req->opcode = opcode;
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
}
static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
}
static int virtio_crypto_rsa_sign(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
}
static int virtio_crypto_rsa_verify(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
}
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen,
bool private,
int padding_algo,
int hash_algo)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
struct virtio_crypto *vcrypto;
struct virtio_crypto_ctrl_header header;
struct virtio_crypto_akcipher_session_para para;
struct rsa_key rsa_key = {0};
int node = virtio_crypto_get_current_node();
uint32_t keytype;
int ret;
/* mpi_free will test n, just free it. */
mpi_free(rsa_ctx->n);
rsa_ctx->n = NULL;
if (private) {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
ret = rsa_parse_priv_key(&rsa_key, key, keylen);
} else {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
ret = rsa_parse_pub_key(&rsa_key, key, keylen);
}
if (ret)
return ret;
rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
if (!rsa_ctx->n)
return -ENOMEM;
if (!ctx->vcrypto) {
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
VIRTIO_CRYPTO_AKCIPHER_RSA);
if (!vcrypto) {
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
ctx->vcrypto = vcrypto;
} else {
virtio_crypto_alg_akcipher_close_session(ctx);
}
/* set ctrl header */
header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
header.queue_id = 0;
/* set RSA para */
para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
para.keytype = cpu_to_le32(keytype);
para.keylen = cpu_to_le32(keylen);
para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
return virtio_crypto_alg_akcipher_init_session(ctx, &header, ¶, key, keylen);
}
static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
VIRTIO_CRYPTO_RSA_RAW_PADDING,
VIRTIO_CRYPTO_RSA_NO_HASH);
}
static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
VIRTIO_CRYPTO_RSA_SHA1);
}
static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
VIRTIO_CRYPTO_RSA_RAW_PADDING,
VIRTIO_CRYPTO_RSA_NO_HASH);
}
static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
VIRTIO_CRYPTO_RSA_SHA1);
}
static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
return mpi_get_size(rsa_ctx->n);
}
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
ctx->tfm = tfm;
akcipher_set_reqsize(tfm,
sizeof(struct virtio_crypto_akcipher_request));
return 0;
}
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
virtio_crypto_alg_akcipher_close_session(ctx);
virtcrypto_dev_put(ctx->vcrypto);
mpi_free(rsa_ctx->n);
rsa_ctx->n = NULL;
}
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
.algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
.set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.base = {
.cra_name = "rsa",
.cra_driver_name = "virtio-crypto-rsa",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
.algo.op = {
.do_one_request = virtio_crypto_rsa_do_req,
},
},
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
.algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.sign = virtio_crypto_rsa_sign,
.verify = virtio_crypto_rsa_verify,
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.base = {
.cra_name = "pkcs1pad(rsa,sha1)",
.cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
.algo.op = {
.do_one_request = virtio_crypto_rsa_do_req,
},
},
};
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
uint32_t service = virtio_crypto_akcipher_algs[i].service;
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
ret = crypto_engine_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_akcipher_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
virtio_crypto_akcipher_algs[i].algo.base.base.cra_name);
}
unlock:
mutex_unlock(&algs_lock);
return ret;
}
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
{
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
uint32_t service = virtio_crypto_akcipher_algs[i].service;
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
crypto_engine_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
virtio_crypto_akcipher_algs[i].active_devs--;
}
mutex_unlock(&algs_lock);
}
| linux-master | drivers/crypto/virtio/virtio_crypto_akcipher_algs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Algorithms supported by virtio crypto device
*
* Authors: Gonglei <arei.gonglei@huawei.com>
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*/
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
struct virtio_crypto_skcipher_ctx {
struct virtio_crypto *vcrypto;
struct crypto_skcipher *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
};
struct virtio_crypto_sym_request {
struct virtio_crypto_request base;
/* Cipher or aead */
uint32_t type;
struct virtio_crypto_skcipher_ctx *skcipher_ctx;
struct skcipher_request *skcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
};
struct virtio_crypto_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
struct skcipher_engine_alg algo;
};
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct skcipher_request *req,
int err);
static void virtio_crypto_dataq_sym_callback
(struct virtio_crypto_request *vc_req, int len)
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
struct skcipher_request *ablk_req;
int error;
/* Finish the encrypt or decrypt process */
if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
default:
error = -EIO;
break;
}
ablk_req = vc_sym_req->skcipher_req;
virtio_crypto_skcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
}
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
{
u64 total = 0;
for (total = 0; sg; sg = sg_next(sg))
total += sg->length;
return total;
}
static int
virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
{
switch (key_len) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_192:
case AES_KEYSIZE_256:
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
break;
default:
return -EINVAL;
}
return 0;
}
static int virtio_crypto_alg_skcipher_init_session(
struct virtio_crypto_skcipher_ctx *ctx,
uint32_t alg, const uint8_t *key,
unsigned int keylen,
int encrypt)
{
struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
struct virtio_crypto *vcrypto = ctx->vcrypto;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
int err;
unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_session_input *input;
struct virtio_crypto_sym_create_session_req *sym_create_session;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
/*
* Avoid to do DMA from the stack, switch to using
* dynamically-allocated for the key
*/
uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
if (!cipher_key)
return -ENOMEM;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req) {
err = -ENOMEM;
goto out;
}
/* Pad ctrl header */
ctrl = &vc_ctrl_req->ctrl;
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
ctrl->header.algo = cpu_to_le32(alg);
/* Set the default dataqueue id to 0 */
ctrl->header.queue_id = 0;
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
/* Pad cipher's parameters */
sym_create_session = &ctrl->u.sym_create_session;
sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
sym_create_session->u.cipher.para.algo = ctrl->header.algo;
sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
sym_create_session->u.cipher.para.op = cpu_to_le32(op);
sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr;
/* Set key */
sg_init_one(&key_sg, cipher_key, keylen);
sgs[num_out++] = &key_sg;
/* Return status and session id back */
sg_init_one(&inhdr, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(input->status));
err = -EINVAL;
goto out;
}
if (encrypt)
ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
else
ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
err = 0;
out:
kfree(vc_ctrl_req);
kfree_sensitive(cipher_key);
return err;
}
static int virtio_crypto_alg_skcipher_close_session(
struct virtio_crypto_skcipher_ctx *ctx,
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int err;
unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_inhdr *ctrl_status;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req)
return -ENOMEM;
ctrl_status = &vc_ctrl_req->ctrl_status;
ctrl_status->status = VIRTIO_CRYPTO_ERR;
/* Pad ctrl header */
ctrl = &vc_ctrl_req->ctrl;
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
/* Set the default virtqueue id to 0 */
ctrl->header.queue_id = 0;
destroy_session = &ctrl->u.destroy_session;
if (encrypt)
destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
else
destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr;
/* Return status and session id back */
sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &status_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id);
err = -EINVAL;
goto out;
}
err = 0;
out:
kfree(vc_ctrl_req);
return err;
}
static int virtio_crypto_alg_skcipher_init_sessions(
struct virtio_crypto_skcipher_ctx *ctx,
const uint8_t *key, unsigned int keylen)
{
uint32_t alg;
int ret;
struct virtio_crypto *vcrypto = ctx->vcrypto;
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
return -EINVAL;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
return -EINVAL;
/* Create encryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 1);
if (ret)
return ret;
/* Create decryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 0);
if (ret) {
virtio_crypto_alg_skcipher_close_session(ctx, 1);
return ret;
}
return 0;
}
/* Note: kernel crypto API realization */
static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
const uint8_t *key,
unsigned int keylen)
{
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
uint32_t alg;
int ret;
ret = virtio_crypto_alg_validate_key(keylen, &alg);
if (ret)
return ret;
if (!ctx->vcrypto) {
/* New key */
int node = virtio_crypto_get_current_node();
struct virtio_crypto *vcrypto =
virtcrypto_get_dev_node(node,
VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
if (!vcrypto) {
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
ctx->vcrypto = vcrypto;
} else {
/* Rekeying, we should close the created sessions previously */
virtio_crypto_alg_skcipher_close_session(ctx, 1);
virtio_crypto_alg_skcipher_close_session(ctx, 0);
}
ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
if (ret) {
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
return ret;
}
return 0;
}
static int
__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct skcipher_request *req,
struct data_queue *data_vq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
struct scatterlist *sg;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (src_nents < 0) {
pr_err("Invalid number of src SG.\n");
return src_nents;
}
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
src_nents, dst_nents);
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3;
sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!sgs)
return -ENOMEM;
req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!req_data) {
kfree(sgs);
return -ENOMEM;
}
vc_req->req_data = req_data;
vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
/* Head of operation */
if (vc_sym_req->encrypt) {
req_data->header.session_id =
cpu_to_le64(ctx->enc_sess_info.session_id);
req_data->header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
} else {
req_data->header.session_id =
cpu_to_le64(ctx->dec_sess_info.session_id);
req_data->header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
}
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
req_data->u.sym_req.u.cipher.para.src_data_len =
cpu_to_le32(req->cryptlen);
dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
if (unlikely(dst_len > U32_MAX)) {
pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
err = -EINVAL;
goto free;
}
dst_len = min_t(unsigned int, req->cryptlen, dst_len);
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->cryptlen, dst_len);
if (unlikely(req->cryptlen + dst_len + ivsize +
sizeof(vc_req->status) > vcrypto->max_size)) {
pr_err("virtio_crypto: The length is too big\n");
err = -EINVAL;
goto free;
}
req_data->u.sym_req.u.cipher.para.dst_data_len =
cpu_to_le32((uint32_t)dst_len);
/* Outhdr */
sg_init_one(&outhdr, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr;
/* IV */
/*
* Avoid to do DMA from the stack, switch to using
* dynamically-allocated for the IV
*/
iv = kzalloc_node(ivsize, GFP_ATOMIC,
dev_to_node(&vcrypto->vdev->dev));
if (!iv) {
err = -ENOMEM;
goto free;
}
memcpy(iv, req->iv, ivsize);
if (!vc_sym_req->encrypt)
scatterwalk_map_and_copy(req->iv, req->src,
req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
vc_sym_req->iv = iv;
/* Source data */
for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
sgs[num_out++] = sg;
/* Destination data */
for (sg = req->dst; sg; sg = sg_next(sg))
sgs[num_out + num_in++] = sg;
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
sgs[num_out + num_in++] = &status_sg;
vc_req->sgs = sgs;
spin_lock_irqsave(&data_vq->lock, flags);
err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
num_in, vc_req, GFP_ATOMIC);
virtqueue_kick(data_vq->vq);
spin_unlock_irqrestore(&data_vq->lock, flags);
if (unlikely(err < 0))
goto free_iv;
return 0;
free_iv:
kfree_sensitive(iv);
free:
kfree_sensitive(req_data);
kfree(sgs);
return err;
}
static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
if (!req->cryptlen)
return 0;
if (req->cryptlen % AES_BLOCK_SIZE)
return -EINVAL;
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
vc_sym_req->skcipher_ctx = ctx;
vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = true;
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
if (!req->cryptlen)
return 0;
if (req->cryptlen % AES_BLOCK_SIZE)
return -EINVAL;
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
vc_sym_req->skcipher_ctx = ctx;
vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = false;
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
{
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
ctx->tfm = tfm;
return 0;
}
static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
{
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (!ctx->vcrypto)
return;
virtio_crypto_alg_skcipher_close_session(ctx, 1);
virtio_crypto_alg_skcipher_close_session(ctx, 0);
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
}
int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq)
{
struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
struct virtio_crypto_sym_request *vc_sym_req =
skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
virtqueue_kick(data_vq->vq);
return 0;
}
static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct skcipher_request *req,
int err)
{
if (vc_sym_req->encrypt)
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
kfree_sensitive(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
req, err);
}
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
.algo.base = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "virtio_crypto_aes_cbc",
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
.base.cra_module = THIS_MODULE,
.init = virtio_crypto_skcipher_init,
.exit = virtio_crypto_skcipher_exit,
.setkey = virtio_crypto_skcipher_setkey,
.decrypt = virtio_crypto_skcipher_decrypt,
.encrypt = virtio_crypto_skcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.algo.op = {
.do_one_request = virtio_crypto_skcipher_crypt_req,
},
} };
int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
uint32_t service = virtio_crypto_algs[i].service;
uint32_t algonum = virtio_crypto_algs[i].algonum;
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_algs[i].active_devs == 0) {
ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
virtio_crypto_algs[i].algo.base.base.cra_name);
}
unlock:
mutex_unlock(&algs_lock);
return ret;
}
void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
{
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
uint32_t service = virtio_crypto_algs[i].service;
uint32_t algonum = virtio_crypto_algs[i].algonum;
if (virtio_crypto_algs[i].active_devs == 0 ||
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_algs[i].active_devs == 1)
crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo);
virtio_crypto_algs[i].active_devs--;
}
mutex_unlock(&algs_lock);
}
| linux-master | drivers/crypto/virtio/virtio_crypto_skcipher_algs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Virtio crypto device.
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/virtio_config.h>
#include <linux/cpu.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
{
if (vc_req) {
kfree_sensitive(vc_req->req_data);
kfree(vc_req->sgs);
}
}
static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
{
complete(&vc_ctrl_req->compl);
}
static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
unsigned long flags;
unsigned int len;
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
virtio_crypto_ctrlq_callback(vc_ctrl_req);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
}
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
}
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
unsigned int out_sgs, unsigned int in_sgs,
struct virtio_crypto_ctrl_request *vc_ctrl_req)
{
int err;
unsigned long flags;
init_completion(&vc_ctrl_req->compl);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
if (err < 0) {
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
wait_for_completion(&vc_ctrl_req->compl);
return 0;
}
static void virtcrypto_dataq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct virtio_crypto_request *vc_req;
unsigned long flags;
unsigned int len;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(
&vcrypto->data_vq[qid].lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
spin_lock_irqsave(
&vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
}
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
{
vq_callback_t **callbacks;
struct virtqueue **vqs;
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
struct device *dev = &vi->vdev->dev;
/*
* We expect 1 data virtqueue, followed by
* possible N-1 data queues used in multiqueue mode,
* followed by control vq.
*/
total_vqs = vi->max_data_queues + 1;
/* Allocate space for find_vqs parameters */
vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vq;
callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
if (!callbacks)
goto err_callback;
names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
/* Parameters for control virtqueue */
callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
names[total_vqs - 1] = "controlq";
/* Allocate/initialize parameters for data virtqueues */
for (i = 0; i < vi->max_data_queues; i++) {
callbacks[i] = virtcrypto_dataq_callback;
snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
"dataq.%d", i);
names[i] = vi->data_vq[i].name;
}
ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
if (ret)
goto err_find;
vi->ctrl_vq = vqs[total_vqs - 1];
for (i = 0; i < vi->max_data_queues; i++) {
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
goto err_engine;
}
}
kfree(names);
kfree(callbacks);
kfree(vqs);
return 0;
err_engine:
err_find:
kfree(names);
err_names:
kfree(callbacks);
err_callback:
kfree(vqs);
err_vq:
return ret;
}
static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
{
vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
GFP_KERNEL);
if (!vi->data_vq)
return -ENOMEM;
return 0;
}
static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
{
int i;
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_data_queues; i++)
virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
vi->affinity_hint_set = false;
}
}
static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
{
int i = 0;
int cpu;
/*
* In single queue mode, we don't set the cpu affinity.
*/
if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
virtcrypto_clean_affinity(vcrypto, -1);
return;
}
/*
* In multiqueue mode, we let the queue to be private to one cpu
* by setting the affinity hint to eliminate the contention.
*
* TODO: adds cpu hotplug support by register cpu notifier.
*
*/
for_each_online_cpu(cpu) {
virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
if (++i >= vcrypto->max_data_queues)
break;
}
vcrypto->affinity_hint_set = true;
}
static void virtcrypto_free_queues(struct virtio_crypto *vi)
{
kfree(vi->data_vq);
}
static int virtcrypto_init_vqs(struct virtio_crypto *vi)
{
int ret;
/* Allocate send & receive queues */
ret = virtcrypto_alloc_queues(vi);
if (ret)
goto err;
ret = virtcrypto_find_vqs(vi);
if (ret)
goto err_free;
cpus_read_lock();
virtcrypto_set_affinity(vi);
cpus_read_unlock();
return 0;
err_free:
virtcrypto_free_queues(vi);
err:
return ret;
}
static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
{
u32 status;
int err;
virtio_cread_le(vcrypto->vdev,
struct virtio_crypto_config, status, &status);
/*
* Unknown status bits would be a host error and the driver
* should consider the device to be broken.
*/
if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
dev_warn(&vcrypto->vdev->dev,
"Unknown status bits: 0x%x\n", status);
virtio_break_device(vcrypto->vdev);
return -EPERM;
}
if (vcrypto->status == status)
return 0;
vcrypto->status = status;
if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
err = virtcrypto_dev_start(vcrypto);
if (err) {
dev_err(&vcrypto->vdev->dev,
"Failed to start virtio crypto device.\n");
return -EPERM;
}
dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
} else {
virtcrypto_dev_stop(vcrypto);
dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
}
return 0;
}
static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
{
int32_t i;
int ret;
for (i = 0; i < vcrypto->max_data_queues; i++) {
if (vcrypto->data_vq[i].engine) {
ret = crypto_engine_start(vcrypto->data_vq[i].engine);
if (ret)
goto err;
}
}
return 0;
err:
while (--i >= 0)
if (vcrypto->data_vq[i].engine)
crypto_engine_exit(vcrypto->data_vq[i].engine);
return ret;
}
static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
{
u32 i;
for (i = 0; i < vcrypto->max_data_queues; i++)
if (vcrypto->data_vq[i].engine)
crypto_engine_exit(vcrypto->data_vq[i].engine);
}
static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
{
struct virtio_device *vdev = vcrypto->vdev;
virtcrypto_clean_affinity(vcrypto, -1);
vdev->config->del_vqs(vdev);
virtcrypto_free_queues(vcrypto);
}
static int virtcrypto_probe(struct virtio_device *vdev)
{
int err = -EFAULT;
struct virtio_crypto *vcrypto;
u32 max_data_queues = 0, max_cipher_key_len = 0;
u32 max_auth_key_len = 0;
u64 max_size = 0;
u32 cipher_algo_l = 0;
u32 cipher_algo_h = 0;
u32 hash_algo = 0;
u32 mac_algo_l = 0;
u32 mac_algo_h = 0;
u32 aead_algo = 0;
u32 akcipher_algo = 0;
u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
/*
* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow.
*/
dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
dev_to_node(&vdev->dev));
if (!vcrypto)
return -ENOMEM;
virtio_cread_le(vdev, struct virtio_crypto_config,
max_dataqueues, &max_data_queues);
if (max_data_queues < 1)
max_data_queues = 1;
virtio_cread_le(vdev, struct virtio_crypto_config,
max_cipher_key_len, &max_cipher_key_len);
virtio_cread_le(vdev, struct virtio_crypto_config,
max_auth_key_len, &max_auth_key_len);
virtio_cread_le(vdev, struct virtio_crypto_config,
max_size, &max_size);
virtio_cread_le(vdev, struct virtio_crypto_config,
crypto_services, &crypto_services);
virtio_cread_le(vdev, struct virtio_crypto_config,
cipher_algo_l, &cipher_algo_l);
virtio_cread_le(vdev, struct virtio_crypto_config,
cipher_algo_h, &cipher_algo_h);
virtio_cread_le(vdev, struct virtio_crypto_config,
hash_algo, &hash_algo);
virtio_cread_le(vdev, struct virtio_crypto_config,
mac_algo_l, &mac_algo_l);
virtio_cread_le(vdev, struct virtio_crypto_config,
mac_algo_h, &mac_algo_h);
virtio_cread_le(vdev, struct virtio_crypto_config,
aead_algo, &aead_algo);
if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
virtio_cread_le(vdev, struct virtio_crypto_config,
akcipher_algo, &akcipher_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
if (err) {
dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
goto free;
}
vcrypto->owner = THIS_MODULE;
vcrypto = vdev->priv = vcrypto;
vcrypto->vdev = vdev;
spin_lock_init(&vcrypto->ctrl_lock);
/* Use single data queue as default */
vcrypto->curr_queue = 1;
vcrypto->max_data_queues = max_data_queues;
vcrypto->max_cipher_key_len = max_cipher_key_len;
vcrypto->max_auth_key_len = max_auth_key_len;
vcrypto->max_size = max_size;
vcrypto->crypto_services = crypto_services;
vcrypto->cipher_algo_l = cipher_algo_l;
vcrypto->cipher_algo_h = cipher_algo_h;
vcrypto->mac_algo_l = mac_algo_l;
vcrypto->mac_algo_h = mac_algo_h;
vcrypto->hash_algo = hash_algo;
vcrypto->aead_algo = aead_algo;
vcrypto->akcipher_algo = akcipher_algo;
dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
vcrypto->max_data_queues,
vcrypto->max_cipher_key_len,
vcrypto->max_auth_key_len,
vcrypto->max_size);
err = virtcrypto_init_vqs(vcrypto);
if (err) {
dev_err(&vdev->dev, "Failed to initialize vqs.\n");
goto free_dev;
}
err = virtcrypto_start_crypto_engines(vcrypto);
if (err)
goto free_vqs;
virtio_device_ready(vdev);
err = virtcrypto_update_status(vcrypto);
if (err)
goto free_engines;
return 0;
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
free_dev:
virtcrypto_devmgr_rm_dev(vcrypto);
free:
kfree(vcrypto);
return err;
}
static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
{
struct virtio_crypto_request *vc_req;
int i;
struct virtqueue *vq;
for (i = 0; i < vcrypto->max_data_queues; i++) {
vq = vcrypto->data_vq[i].vq;
while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
kfree(vc_req->req_data);
kfree(vc_req->sgs);
}
cond_resched();
}
}
static void virtcrypto_remove(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
virtcrypto_del_vqs(vcrypto);
virtcrypto_devmgr_rm_dev(vcrypto);
kfree(vcrypto);
}
static void virtcrypto_config_changed(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
virtcrypto_update_status(vcrypto);
}
#ifdef CONFIG_PM_SLEEP
static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
virtcrypto_del_vqs(vcrypto);
return 0;
}
static int virtcrypto_restore(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
int err;
err = virtcrypto_init_vqs(vcrypto);
if (err)
return err;
err = virtcrypto_start_crypto_engines(vcrypto);
if (err)
goto free_vqs;
virtio_device_ready(vdev);
err = virtcrypto_dev_start(vcrypto);
if (err) {
dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
goto free_engines;
}
return 0;
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
return err;
}
#endif
static const unsigned int features[] = {
/* none */
};
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static struct virtio_driver virtio_crypto_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
.probe = virtcrypto_probe,
.remove = virtcrypto_remove,
.config_changed = virtcrypto_config_changed,
#ifdef CONFIG_PM_SLEEP
.freeze = virtcrypto_freeze,
.restore = virtcrypto_restore,
#endif
};
module_virtio_driver(virtio_crypto_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("virtio crypto device driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
| linux-master | drivers/crypto/virtio/virtio_crypto_core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Management for virtio crypto devices (refer to adf_dev_mgr.c)
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*/
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/module.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
static LIST_HEAD(virtio_crypto_table);
static uint32_t num_devices;
/* The table_lock protects the above global list and num_devices */
static DEFINE_MUTEX(table_lock);
#define VIRTIO_CRYPTO_MAX_DEVICES 32
/*
* virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
* framework.
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Function adds virtio crypto device to the global list.
* To be used by virtio crypto device specific drivers.
*
* Return: 0 on success, error code othewise.
*/
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
{
struct list_head *itr;
mutex_lock(&table_lock);
if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
pr_info("virtio_crypto: only support up to %d devices\n",
VIRTIO_CRYPTO_MAX_DEVICES);
mutex_unlock(&table_lock);
return -EFAULT;
}
list_for_each(itr, &virtio_crypto_table) {
struct virtio_crypto *ptr =
list_entry(itr, struct virtio_crypto, list);
if (ptr == vcrypto_dev) {
mutex_unlock(&table_lock);
return -EEXIST;
}
}
atomic_set(&vcrypto_dev->ref_count, 0);
list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
vcrypto_dev->dev_id = num_devices++;
mutex_unlock(&table_lock);
return 0;
}
struct list_head *virtcrypto_devmgr_get_head(void)
{
return &virtio_crypto_table;
}
/*
* virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
* framework.
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Function removes virtio crypto device from the acceleration framework.
* To be used by virtio crypto device specific drivers.
*
* Return: void
*/
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
{
mutex_lock(&table_lock);
list_del(&vcrypto_dev->list);
num_devices--;
mutex_unlock(&table_lock);
}
/*
* virtcrypto_devmgr_get_first()
*
* Function returns the first virtio crypto device from the acceleration
* framework.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
struct virtio_crypto *virtcrypto_devmgr_get_first(void)
{
struct virtio_crypto *dev = NULL;
mutex_lock(&table_lock);
if (!list_empty(&virtio_crypto_table))
dev = list_first_entry(&virtio_crypto_table,
struct virtio_crypto,
list);
mutex_unlock(&table_lock);
return dev;
}
/*
* virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
* @vcrypto_dev: Pointer to virtio crypto device.
*
* To be used by virtio crypto device specific drivers.
*
* Return: 1 when device is in use, 0 otherwise.
*/
int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
{
return atomic_read(&vcrypto_dev->ref_count) != 0;
}
/*
* virtcrypto_dev_get() - Increment vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Increment the vcrypto_dev refcount and if this is the first time
* incrementing it during this period the vcrypto_dev is in use,
* increment the module refcount too.
* To be used by virtio crypto device specific drivers.
*
* Return: 0 when successful, EFAULT when fail to bump module refcount
*/
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
{
if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
if (!try_module_get(vcrypto_dev->owner))
return -EFAULT;
return 0;
}
/*
* virtcrypto_dev_put() - Decrement vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Decrement the vcrypto_dev refcount and if this is the last time
* decrementing it during this period the vcrypto_dev is in use,
* decrement the module refcount too.
* To be used by virtio crypto device specific drivers.
*
* Return: void
*/
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
{
if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
module_put(vcrypto_dev->owner);
}
/*
* virtcrypto_dev_started() - Check whether device has started
* @vcrypto_dev: Pointer to virtio crypto device.
*
* To be used by virtio crypto device specific drivers.
*
* Return: 1 when the device has started, 0 otherwise
*/
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
{
return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
}
/*
* virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
* @node: Node id the driver works.
* @service: Crypto service that needs to be supported by the
* dev
* @algo: The algorithm number that needs to be supported by the
* dev
*
* Function returns the virtio crypto device used fewest on the node,
* and supports the given crypto service and algorithm.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
uint32_t algo)
{
struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
unsigned long best = ~0;
unsigned long ctr;
mutex_lock(&table_lock);
list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
dev_to_node(&tmp_dev->vdev->dev) < 0) &&
virtcrypto_dev_started(tmp_dev) &&
virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
vcrypto_dev = tmp_dev;
best = ctr;
}
}
}
if (!vcrypto_dev) {
pr_info("virtio_crypto: Could not find a device on node %d\n",
node);
/* Get any started device */
list_for_each_entry(tmp_dev,
virtcrypto_devmgr_get_head(), list) {
if (virtcrypto_dev_started(tmp_dev) &&
virtcrypto_algo_is_supported(tmp_dev,
service, algo)) {
vcrypto_dev = tmp_dev;
break;
}
}
}
mutex_unlock(&table_lock);
if (!vcrypto_dev)
return NULL;
virtcrypto_dev_get(vcrypto_dev);
return vcrypto_dev;
}
/*
* virtcrypto_dev_start() - Start virtio crypto device
* @vcrypto: Pointer to virtio crypto device.
*
* Function notifies all the registered services that the virtio crypto device
* is ready to be used.
* To be used by virtio crypto device specific drivers.
*
* Return: 0 on success, EFAULT when fail to register algorithms
*/
int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{
if (virtio_crypto_skcipher_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
return -EFAULT;
}
if (virtio_crypto_akcipher_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
virtio_crypto_skcipher_algs_unregister(vcrypto);
return -EFAULT;
}
return 0;
}
/*
* virtcrypto_dev_stop() - Stop virtio crypto device
* @vcrypto: Pointer to virtio crypto device.
*
* Function notifies all the registered services that the virtio crypto device
* is ready to be used.
* To be used by virtio crypto device specific drivers.
*
* Return: void
*/
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{
virtio_crypto_skcipher_algs_unregister(vcrypto);
virtio_crypto_akcipher_algs_unregister(vcrypto);
}
/*
* vcrypto_algo_is_supported()
* @vcrypto: Pointer to virtio crypto device.
* @service: The bit number for service validate.
* See VIRTIO_CRYPTO_SERVICE_*
* @algo : The bit number for the algorithm to validate.
*
*
* Validate if the virtio crypto device supports a service and
* algo.
*
* Return true if device supports a service and algo.
*/
bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
uint32_t service,
uint32_t algo)
{
uint32_t service_mask = 1u << service;
uint32_t algo_mask = 0;
bool low = true;
if (algo > 31) {
algo -= 32;
low = false;
}
if (!(vcrypto->crypto_services & service_mask))
return false;
switch (service) {
case VIRTIO_CRYPTO_SERVICE_CIPHER:
if (low)
algo_mask = vcrypto->cipher_algo_l;
else
algo_mask = vcrypto->cipher_algo_h;
break;
case VIRTIO_CRYPTO_SERVICE_HASH:
algo_mask = vcrypto->hash_algo;
break;
case VIRTIO_CRYPTO_SERVICE_MAC:
if (low)
algo_mask = vcrypto->mac_algo_l;
else
algo_mask = vcrypto->mac_algo_h;
break;
case VIRTIO_CRYPTO_SERVICE_AEAD:
algo_mask = vcrypto->aead_algo;
break;
case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
algo_mask = vcrypto->akcipher_algo;
break;
}
if (!(algo_mask & (1u << algo)))
return false;
return true;
}
| linux-master | drivers/crypto/virtio/virtio_crypto_mgr.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include "otx2_cpt_common.h"
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "otx2_cptvf_algs.h"
#include "cn10k_cpt.h"
#include <rvu_reg.h>
#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
{
/* Clear interrupt if any */
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
0x1ULL);
/* Enable PF-VF interrupt */
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
}
static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
{
/* Disable PF-VF interrupt */
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
/* Clear interrupt if any */
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
0x1ULL);
}
static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
{
int ret, irq;
int num_vec;
num_vec = pci_msix_vec_count(cptvf->pdev);
if (num_vec <= 0)
return -EINVAL;
/* Enable MSI-X */
ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&cptvf->pdev->dev,
"Request for %d msix vectors failed\n", num_vec);
return ret;
}
irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
/* Register VF<=>PF mailbox interrupt handler */
ret = devm_request_irq(&cptvf->pdev->dev, irq,
otx2_cptvf_pfvf_mbox_intr, 0,
"CPTPFVF Mbox", cptvf);
if (ret)
return ret;
/* Enable PF-VF mailbox interrupts */
cptvf_enable_pfvf_mbox_intrs(cptvf);
ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
if (ret) {
dev_warn(&cptvf->pdev->dev,
"PF not responding to mailbox, deferring probe\n");
cptvf_disable_pfvf_mbox_intrs(cptvf);
return -EPROBE_DEFER;
}
return 0;
}
static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
resource_size_t offset, size;
int ret;
cptvf->pfvf_mbox_wq =
alloc_ordered_workqueue("cpt_pfvf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!cptvf->pfvf_mbox_wq)
return -ENOMEM;
if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
/* For cn10k platform, VF mailbox region is in its BAR2
* register space
*/
cptvf->pfvf_mbox_base = cptvf->reg_base +
CN10K_CPT_VF_MBOX_REGION;
} else {
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
/* Map PF-VF mailbox memory */
cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
size);
if (!cptvf->pfvf_mbox_base) {
dev_err(&pdev->dev, "Unable to map BAR4\n");
ret = -ENOMEM;
goto free_wqe;
}
}
ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
if (ret)
goto free_wqe;
ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
if (ret)
goto destroy_mbox;
INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
return 0;
destroy_mbox:
otx2_mbox_destroy(&cptvf->pfvf_mbox);
free_wqe:
destroy_workqueue(cptvf->pfvf_mbox_wq);
return ret;
}
static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
{
destroy_workqueue(cptvf->pfvf_mbox_wq);
otx2_mbox_destroy(&cptvf->pfvf_mbox);
}
static void cptlf_work_handler(unsigned long data)
{
otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
}
static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
{
int i;
for (i = 0; i < lfs->lfs_num; i++) {
if (!lfs->lf[i].wqe)
continue;
tasklet_kill(&lfs->lf[i].wqe->work);
kfree(lfs->lf[i].wqe);
lfs->lf[i].wqe = NULL;
}
}
static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
{
struct otx2_cptlf_wqe *wqe;
int i, ret = 0;
for (i = 0; i < lfs->lfs_num; i++) {
wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
if (!wqe) {
ret = -ENOMEM;
goto cleanup_tasklet;
}
tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
wqe->lfs = lfs;
wqe->lf_num = i;
lfs->lf[i].wqe = wqe;
}
return 0;
cleanup_tasklet:
cleanup_tasklet_work(lfs);
return ret;
}
static void free_pending_queues(struct otx2_cptlfs_info *lfs)
{
int i;
for (i = 0; i < lfs->lfs_num; i++) {
kfree(lfs->lf[i].pqueue.head);
lfs->lf[i].pqueue.head = NULL;
}
}
static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
{
int size, ret, i;
if (!lfs->lfs_num)
return -EINVAL;
for (i = 0; i < lfs->lfs_num; i++) {
lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
size = lfs->lf[i].pqueue.qlen *
sizeof(struct otx2_cpt_pending_entry);
lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
if (!lfs->lf[i].pqueue.head) {
ret = -ENOMEM;
goto error;
}
/* Initialize spin lock */
spin_lock_init(&lfs->lf[i].pqueue.lock);
}
return 0;
error:
free_pending_queues(lfs);
return ret;
}
static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
{
cleanup_tasklet_work(lfs);
free_pending_queues(lfs);
}
static int lf_sw_init(struct otx2_cptlfs_info *lfs)
{
int ret;
ret = alloc_pending_queues(lfs);
if (ret) {
dev_err(&lfs->pdev->dev,
"Allocating pending queues failed\n");
return ret;
}
ret = init_tasklet_work(lfs);
if (ret) {
dev_err(&lfs->pdev->dev,
"Tasklet work init failed\n");
goto pending_queues_free;
}
return 0;
pending_queues_free:
free_pending_queues(lfs);
return ret;
}
static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
{
atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
/* Remove interrupts affinity */
otx2_cptlf_free_irqs_affinity(lfs);
/* Disable instruction queue */
otx2_cptlf_disable_iqueues(lfs);
/* Unregister crypto algorithms */
otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
/* Unregister LFs interrupts */
otx2_cptlf_unregister_interrupts(lfs);
/* Cleanup LFs software side */
lf_sw_cleanup(lfs);
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
}
static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
{
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
struct device *dev = &cptvf->pdev->dev;
int ret, lfs_num;
u8 eng_grp_msk;
/* Get engine group number for symmetric crypto */
cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
if (ret)
return ret;
if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
dev_err(dev, "Engine group for kernel crypto not available\n");
ret = -ENOENT;
return ret;
}
eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
if (ret)
return ret;
lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
num_online_cpus();
otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
&cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
return ret;
/* Get msix offsets for attached LFs */
ret = otx2_cpt_msix_offset_msg(lfs);
if (ret)
goto cleanup_lf;
/* Initialize LFs software side */
ret = lf_sw_init(lfs);
if (ret)
goto cleanup_lf;
/* Register LFs interrupts */
ret = otx2_cptlf_register_interrupts(lfs);
if (ret)
goto cleanup_lf_sw;
/* Set interrupts affinity */
ret = otx2_cptlf_set_irqs_affinity(lfs);
if (ret)
goto unregister_intr;
atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
/* Register crypto algorithms */
ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
if (ret) {
dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
goto disable_irqs;
}
return 0;
disable_irqs:
otx2_cptlf_free_irqs_affinity(lfs);
unregister_intr:
otx2_cptlf_unregister_interrupts(lfs);
cleanup_lf_sw:
lf_sw_cleanup(lfs);
cleanup_lf:
otx2_cptlf_shutdown(lfs);
return ret;
}
static int otx2_cptvf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct otx2_cptvf_dev *cptvf;
int ret;
cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
if (!cptvf)
return -ENOMEM;
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(dev, "Failed to enable PCI device\n");
goto clear_drvdata;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
/* Map VF's configuration registers */
ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
OTX2_CPTVF_DRV_NAME);
if (ret) {
dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
goto clear_drvdata;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, cptvf);
cptvf->pdev = pdev;
cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
ret = cn10k_cptvf_lmtst_init(cptvf);
if (ret)
goto clear_drvdata;
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
goto clear_drvdata;
/* Register interrupts */
ret = cptvf_register_interrupts(cptvf);
if (ret)
goto destroy_pfvf_mbox;
cptvf->blkaddr = BLKADDR_CPT0;
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
goto unregister_interrupts;
return 0;
unregister_interrupts:
cptvf_disable_pfvf_mbox_intrs(cptvf);
destroy_pfvf_mbox:
cptvf_pfvf_mbox_destroy(cptvf);
clear_drvdata:
pci_set_drvdata(pdev, NULL);
return ret;
}
static void otx2_cptvf_remove(struct pci_dev *pdev)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
if (!cptvf) {
dev_err(&pdev->dev, "Invalid CPT VF device.\n");
return;
}
cptvf_lf_shutdown(&cptvf->lfs);
/* Disable PF-VF mailbox interrupt */
cptvf_disable_pfvf_mbox_intrs(cptvf);
/* Destroy PF-VF mbox */
cptvf_pfvf_mbox_destroy(cptvf);
pci_set_drvdata(pdev, NULL);
}
/* Supported devices */
static const struct pci_device_id otx2_cptvf_id_table[] = {
{PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
{PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
{ 0, } /* end of table */
};
static struct pci_driver otx2_cptvf_pci_driver = {
.name = OTX2_CPTVF_DRV_NAME,
.id_table = otx2_cptvf_id_table,
.probe = otx2_cptvf_probe,
.remove = otx2_cptvf_remove,
};
module_pci_driver(otx2_cptvf_pci_driver);
MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include "otx2_cptvf.h"
#include "otx2_cpt_common.h"
/* SG list header size in bytes */
#define SG_LIST_HDR_SIZE 8
/* Default timeout when waiting for free pending entry in us */
#define CPT_PENTRY_TIMEOUT 1000
#define CPT_PENTRY_STEP 50
/* Default threshold for stopping and resuming sender requests */
#define CPT_IQ_STOP_MARGIN 128
#define CPT_IQ_RESUME_MARGIN 512
/* Default command timeout in seconds */
#define CPT_COMMAND_TIMEOUT 4
#define CPT_TIME_IN_RESET_COUNT 5
static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
struct otx2_cpt_req_info *req)
{
int i;
pr_debug("Gather list size %d\n", req->in_cnt);
for (i = 0; i < req->in_cnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->in[i].size, req->in[i].vptr,
(void *) req->in[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n",
req->in[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->in[i].vptr, req->in[i].size, false);
}
pr_debug("Scatter list size %d\n", req->out_cnt);
for (i = 0; i < req->out_cnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->out[i].size, req->out[i].vptr,
(void *) req->out[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->out[i].vptr, req->out[i].size, false);
}
}
static inline struct otx2_cpt_pending_entry *get_free_pending_entry(
struct otx2_cpt_pending_queue *q,
int qlen)
{
struct otx2_cpt_pending_entry *ent = NULL;
ent = &q->head[q->rear];
if (unlikely(ent->busy))
return NULL;
q->rear++;
if (unlikely(q->rear == qlen))
q->rear = 0;
return ent;
}
static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
{
if (WARN_ON(inc > length))
inc = length;
index += inc;
if (unlikely(index >= length))
index -= length;
return index;
}
static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
{
pentry->completion_addr = NULL;
pentry->info = NULL;
pentry->callback = NULL;
pentry->areq = NULL;
pentry->resume_sender = false;
pentry->busy = false;
}
static inline int setup_sgio_components(struct pci_dev *pdev,
struct otx2_cpt_buf_ptr *list,
int buf_count, u8 *buffer)
{
struct otx2_cpt_sglist_component *sg_ptr = NULL;
int ret = 0, i, j;
int components;
if (unlikely(!list)) {
dev_err(&pdev->dev, "Input list pointer is NULL\n");
return -EFAULT;
}
for (i = 0; i < buf_count; i++) {
if (unlikely(!list[i].vptr))
continue;
list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
list[i].size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
dev_err(&pdev->dev, "Dma mapping failed\n");
ret = -EIO;
goto sg_cleanup;
}
}
components = buf_count / 4;
sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
for (i = 0; i < components; i++) {
sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
sg_ptr++;
}
components = buf_count % 4;
switch (components) {
case 3:
sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
fallthrough;
case 2:
sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
fallthrough;
case 1:
sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
break;
default:
break;
}
return ret;
sg_cleanup:
for (j = 0; j < i; j++) {
if (list[j].dma_addr) {
dma_unmap_single(&pdev->dev, list[j].dma_addr,
list[j].size, DMA_BIDIRECTIONAL);
}
list[j].dma_addr = 0;
}
return ret;
}
static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
struct otx2_cpt_req_info *req,
gfp_t gfp)
{
int align = OTX2_CPT_DMA_MINALIGN;
struct otx2_cpt_inst_info *info;
u32 dlen, align_dlen, info_len;
u16 g_sz_bytes, s_sz_bytes;
u32 total_mem_len;
if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
dev_err(&pdev->dev, "Error too many sg components\n");
return NULL;
}
g_sz_bytes = ((req->in_cnt + 3) / 4) *
sizeof(struct otx2_cpt_sglist_component);
s_sz_bytes = ((req->out_cnt + 3) / 4) *
sizeof(struct otx2_cpt_sglist_component);
dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
align_dlen = ALIGN(dlen, align);
info_len = ALIGN(sizeof(*info), align);
total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
return NULL;
info->dlen = dlen;
info->in_buffer = (u8 *)info + info_len;
((u16 *)info->in_buffer)[0] = req->out_cnt;
((u16 *)info->in_buffer)[1] = req->in_cnt;
((u16 *)info->in_buffer)[2] = 0;
((u16 *)info->in_buffer)[3] = 0;
cpu_to_be64s((u64 *)info->in_buffer);
/* Setup gather (input) components */
if (setup_sgio_components(pdev, req->in, req->in_cnt,
&info->in_buffer[8])) {
dev_err(&pdev->dev, "Failed to setup gather list\n");
goto destroy_info;
}
if (setup_sgio_components(pdev, req->out, req->out_cnt,
&info->in_buffer[8 + g_sz_bytes])) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
info->dma_len = total_mem_len - info_len;
info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
info->dma_len, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
goto destroy_info;
}
/*
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
info->completion_addr = info->in_buffer + align_dlen;
info->comp_baddr = info->dptr_baddr + align_dlen;
return info;
destroy_info:
otx2_cpt_info_destroy(pdev, info);
return NULL;
}
static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
struct otx2_cpt_pending_queue *pqueue,
struct otx2_cptlf_info *lf)
{
struct otx2_cptvf_request *cpt_req = &req->req;
struct otx2_cpt_pending_entry *pentry = NULL;
union otx2_cpt_ctrl_info *ctrl = &req->ctrl;
struct otx2_cpt_inst_info *info = NULL;
union otx2_cpt_res_s *result = NULL;
struct otx2_cpt_iq_command iq_cmd;
union otx2_cpt_inst_s cptinst;
int retry, ret = 0;
u8 resume_sender;
gfp_t gfp;
gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
if (unlikely(!otx2_cptlf_started(lf->lfs)))
return -ENODEV;
info = info_create(pdev, req, gfp);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Setting up cpt inst info failed");
return -ENOMEM;
}
cpt_req->dlen = info->dlen;
result = info->completion_addr;
result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
spin_lock_bh(&pqueue->lock);
pentry = get_free_pending_entry(pqueue, pqueue->qlen);
retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
while (unlikely(!pentry) && retry--) {
spin_unlock_bh(&pqueue->lock);
udelay(CPT_PENTRY_STEP);
spin_lock_bh(&pqueue->lock);
pentry = get_free_pending_entry(pqueue, pqueue->qlen);
}
if (unlikely(!pentry)) {
ret = -ENOSPC;
goto destroy_info;
}
/*
* Check if we are close to filling in entire pending queue,
* if so then tell the sender to stop/sleep by returning -EBUSY
* We do it only for context which can sleep (GFP_KERNEL)
*/
if (gfp == GFP_KERNEL &&
pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
pentry->resume_sender = true;
} else
pentry->resume_sender = false;
resume_sender = pentry->resume_sender;
pqueue->pending_count++;
pentry->completion_addr = info->completion_addr;
pentry->info = info;
pentry->callback = req->callback;
pentry->areq = req->areq;
pentry->busy = true;
info->pentry = pentry;
info->time_in = jiffies;
info->req = req;
/* Fill in the command */
iq_cmd.cmd.u = 0;
iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
/* 64-bit swap for microcode data reads, not needed for addresses*/
cpu_to_be64s(&iq_cmd.cmd.u);
iq_cmd.dptr = info->dptr_baddr;
iq_cmd.rptr = 0;
iq_cmd.cptr.u = 0;
iq_cmd.cptr.s.grp = ctrl->s.grp;
/* Fill in the CPT_INST_S type command for HW interpretation */
otx2_cpt_fill_inst(&cptinst, &iq_cmd, info->comp_baddr);
/* Print debug info if enabled */
otx2_cpt_dump_sg_list(pdev, req);
pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX2_CPT_INST_SIZE);
print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX2_CPT_INST_SIZE, false);
pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
cpt_req->dlen, false);
/* Send CPT command */
lf->lfs->ops->send_cmd(&cptinst, 1, lf);
/*
* We allocate and prepare pending queue entry in critical section
* together with submitting CPT instruction to CPT instruction queue
* to make sure that order of CPT requests is the same in both
* pending and instruction queues
*/
spin_unlock_bh(&pqueue->lock);
ret = resume_sender ? -EBUSY : -EINPROGRESS;
return ret;
destroy_info:
spin_unlock_bh(&pqueue->lock);
otx2_cpt_info_destroy(pdev, info);
return ret;
}
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
return process_request(lfs->pdev, req, &lfs->lf[cpu_num].pqueue,
&lfs->lf[cpu_num]);
}
static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
union otx2_cpt_res_s *cpt_status,
struct otx2_cpt_inst_info *info,
u32 *res_code)
{
u8 uc_ccode = lfs->ops->cpt_get_uc_compcode(cpt_status);
u8 ccode = lfs->ops->cpt_get_compcode(cpt_status);
struct pci_dev *pdev = lfs->pdev;
switch (ccode) {
case OTX2_CPT_COMP_E_FAULT:
dev_err(&pdev->dev,
"Request failed with DMA fault\n");
otx2_cpt_dump_sg_list(pdev, info->req);
break;
case OTX2_CPT_COMP_E_HWERR:
dev_err(&pdev->dev,
"Request failed with hardware error\n");
otx2_cpt_dump_sg_list(pdev, info->req);
break;
case OTX2_CPT_COMP_E_INSTERR:
dev_err(&pdev->dev,
"Request failed with instruction error\n");
otx2_cpt_dump_sg_list(pdev, info->req);
break;
case OTX2_CPT_COMP_E_NOTDONE:
/* check for timeout */
if (time_after_eq(jiffies, info->time_in +
CPT_COMMAND_TIMEOUT * HZ))
dev_warn(&pdev->dev,
"Request timed out 0x%p", info->req);
else if (info->extra_time < CPT_TIME_IN_RESET_COUNT) {
info->time_in = jiffies;
info->extra_time++;
}
return 1;
case OTX2_CPT_COMP_E_GOOD:
case OTX2_CPT_COMP_E_WARN:
/*
* Check microcode completion code, it is only valid
* when completion code is CPT_COMP_E::GOOD
*/
if (uc_ccode != OTX2_CPT_UCC_SUCCESS) {
/*
* If requested hmac is truncated and ucode returns
* s/g write length error then we report success
* because ucode writes as many bytes of calculated
* hmac as available in gather buffer and reports
* s/g write length error if number of bytes in gather
* buffer is less than full hmac size.
*/
if (info->req->is_trunc_hmac &&
uc_ccode == OTX2_CPT_UCC_SG_WRITE_LENGTH) {
*res_code = 0;
break;
}
dev_err(&pdev->dev,
"Request failed with software error code 0x%x\n",
cpt_status->s.uc_compcode);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}
/* Request has been processed with success */
*res_code = 0;
break;
default:
dev_err(&pdev->dev,
"Request returned invalid status %d\n", ccode);
break;
}
return 0;
}
static inline void process_pending_queue(struct otx2_cptlfs_info *lfs,
struct otx2_cpt_pending_queue *pqueue)
{
struct otx2_cpt_pending_entry *resume_pentry = NULL;
void (*callback)(int status, void *arg, void *req);
struct otx2_cpt_pending_entry *pentry = NULL;
union otx2_cpt_res_s *cpt_status = NULL;
struct otx2_cpt_inst_info *info = NULL;
struct otx2_cpt_req_info *req = NULL;
struct crypto_async_request *areq;
struct pci_dev *pdev = lfs->pdev;
u32 res_code, resume_index;
while (1) {
spin_lock_bh(&pqueue->lock);
pentry = &pqueue->head[pqueue->front];
if (WARN_ON(!pentry)) {
spin_unlock_bh(&pqueue->lock);
break;
}
res_code = -EINVAL;
if (unlikely(!pentry->busy)) {
spin_unlock_bh(&pqueue->lock);
break;
}
if (unlikely(!pentry->callback)) {
dev_err(&pdev->dev, "Callback NULL\n");
goto process_pentry;
}
info = pentry->info;
if (unlikely(!info)) {
dev_err(&pdev->dev, "Pending entry post arg NULL\n");
goto process_pentry;
}
req = info->req;
if (unlikely(!req)) {
dev_err(&pdev->dev, "Request NULL\n");
goto process_pentry;
}
cpt_status = pentry->completion_addr;
if (unlikely(!cpt_status)) {
dev_err(&pdev->dev, "Completion address NULL\n");
goto process_pentry;
}
if (cpt_process_ccode(lfs, cpt_status, info, &res_code)) {
spin_unlock_bh(&pqueue->lock);
return;
}
info->pdev = pdev;
process_pentry:
/*
* Check if we should inform sending side to resume
* We do it CPT_IQ_RESUME_MARGIN elements in advance before
* pending queue becomes empty
*/
resume_index = modulo_inc(pqueue->front, pqueue->qlen,
CPT_IQ_RESUME_MARGIN);
resume_pentry = &pqueue->head[resume_index];
if (resume_pentry &&
resume_pentry->resume_sender) {
resume_pentry->resume_sender = false;
callback = resume_pentry->callback;
areq = resume_pentry->areq;
if (callback) {
spin_unlock_bh(&pqueue->lock);
/*
* EINPROGRESS is an indication for sending
* side that it can resume sending requests
*/
callback(-EINPROGRESS, areq, info);
spin_lock_bh(&pqueue->lock);
}
}
callback = pentry->callback;
areq = pentry->areq;
free_pentry(pentry);
pqueue->pending_count--;
pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
spin_unlock_bh(&pqueue->lock);
/*
* Call callback after current pending entry has been
* processed, we don't do it if the callback pointer is
* invalid.
*/
if (callback)
callback(res_code, areq, info);
}
}
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
{
process_pending_queue(wqe->lfs,
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
return cptvf->lfs.kcrypto_eng_grp_num;
}
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2021 Marvell. */
#include <linux/soc/marvell/octeontx2/asm.h>
#include "otx2_cptpf.h"
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf);
static struct cpt_hw_ops otx2_hw_ops = {
.send_cmd = otx2_cpt_send_cmd,
.cpt_get_compcode = otx2_cpt_get_compcode,
.cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
};
static struct cpt_hw_ops cn10k_hw_ops = {
.send_cmd = cn10k_cpt_send_cmd,
.cpt_get_compcode = cn10k_cpt_get_compcode,
.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
};
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf)
{
void __iomem *lmtline = lf->lmtline;
u64 val = (lf->slot & 0x7FF);
u64 tar_addr = 0;
/* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
tar_addr |= (__force u64)lf->ioreg |
(((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
/*
* Make sure memory areas pointed in CPT_INST_S
* are flushed before the instruction is sent to CPT
*/
dma_wmb();
/* Copy CPT command to LMTLINE */
memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
cn10k_lmt_flush(val, tar_addr);
}
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
resource_size_t size;
u64 lmt_base;
if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
cptpf->lfs.ops = &otx2_hw_ops;
return 0;
}
cptpf->lfs.ops = &cn10k_hw_ops;
lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
if (!lmt_base) {
dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
return -ENOMEM;
}
size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
if (!cptpf->lfs.lmt_base) {
dev_err(&pdev->dev,
"Mapping of PF LMTLINE address failed\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
resource_size_t offset, size;
if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
cptvf->lfs.ops = &otx2_hw_ops;
return 0;
}
cptvf->lfs.ops = &cn10k_hw_ops;
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
/* Map VF LMILINE region */
cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
if (!cptvf->lfs.lmt_base) {
dev_err(&pdev->dev, "Unable to map BAR4\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
| linux-master | drivers/crypto/marvell/octeontx2/cn10k_cpt.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include "otx2_cpt_common.h"
#include "otx2_cptpf.h"
#include "rvu_reg.h"
/* Fastpath ipsec opcode with inplace processing */
#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
#define cpt_inline_rx_opcode(pdev) \
({ \
u8 opcode; \
if (is_dev_otx2(pdev)) \
opcode = CPT_INLINE_RX_OPCODE; \
else \
opcode = CN10K_CPT_INLINE_RX_OPCODE; \
(opcode); \
})
/*
* CPT PF driver version, It will be incremented by 1 for every feature
* addition in CPT mailbox messages.
*/
#define OTX2_CPT_PF_DRV_VERSION 0x1
static int forward_to_af(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req, int size)
{
struct mbox_msghdr *msg;
int ret;
mutex_lock(&cptpf->lock);
msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
if (msg == NULL) {
mutex_unlock(&cptpf->lock);
return -ENOMEM;
}
memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
(uint8_t *)req + sizeof(struct mbox_msghdr), size);
msg->id = req->id;
msg->pcifunc = req->pcifunc;
msg->sig = req->sig;
msg->ver = req->ver;
ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
/* Error code -EIO indicate there is a communication failure
* to the AF. Rest of the error codes indicate that AF processed
* VF messages and set the error codes in response messages
* (if any) so simply forward responses to VF.
*/
if (ret == -EIO) {
dev_warn(&cptpf->pdev->dev,
"AF not responding to VF%d messages\n", vf->vf_id);
mutex_unlock(&cptpf->lock);
return ret;
}
mutex_unlock(&cptpf->lock);
return 0;
}
static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req)
{
struct otx2_cpt_caps_rsp *rsp;
rsp = (struct otx2_cpt_caps_rsp *)
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
sizeof(*rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = MBOX_MSG_GET_CAPS;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
rsp->cpt_revision = cptpf->pdev->revision;
memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
return 0;
}
static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req)
{
struct otx2_cpt_egrp_num_msg *grp_req;
struct otx2_cpt_egrp_num_rsp *rsp;
grp_req = (struct otx2_cpt_egrp_num_msg *)req;
rsp = (struct otx2_cpt_egrp_num_rsp *)
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->eng_type = grp_req->eng_type;
rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
grp_req->eng_type);
return 0;
}
static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req)
{
struct otx2_cpt_kvf_limits_rsp *rsp;
rsp = (struct otx2_cpt_kvf_limits_rsp *)
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->kvf_limits = cptpf->kvf_limits;
return 0;
}
static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
int sso_pf_func, u8 slot)
{
struct cpt_inline_ipsec_cfg_msg *req;
struct pci_dev *pdev = cptpf->pdev;
req = (struct cpt_inline_ipsec_cfg_msg *)
otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
sizeof(*req), sizeof(struct msg_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
memset(req, 0, sizeof(*req));
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
req->dir = CPT_INLINE_INBOUND;
req->slot = slot;
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
req->sso_pf_func = sso_pf_func;
req->enable = 1;
return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
}
static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
struct otx2_cpt_rx_inline_lf_cfg *req)
{
struct nix_inline_ipsec_cfg *nix_req;
struct pci_dev *pdev = cptpf->pdev;
int ret;
nix_req = (struct nix_inline_ipsec_cfg *)
otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
sizeof(*nix_req),
sizeof(struct msg_rsp));
if (nix_req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
memset(nix_req, 0, sizeof(*nix_req));
nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
nix_req->enable = 1;
if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
else
nix_req->cpt_credit = req->credit - 1;
nix_req->gen_cfg.egrp = egrp;
if (req->opcode)
nix_req->gen_cfg.opcode = req->opcode;
else
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
nix_req->gen_cfg.param1 = req->param1;
nix_req->gen_cfg.param2 = req->param2;
nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
nix_req->inst_qsel.cpt_slot = 0;
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
if (ret)
return ret;
if (cptpf->has_cpt1) {
ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
if (ret)
return ret;
}
return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
}
static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *req)
{
struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
u8 egrp;
int ret;
cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
if (cptpf->lfs.lfs_num) {
dev_err(&cptpf->pdev->dev,
"LF is already configured for RX inline ipsec.\n");
return -EEXIST;
}
/*
* Allow LFs to execute requests destined to only grp IE_TYPES and
* set queue priority of each LF to high
*/
egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
dev_err(&cptpf->pdev->dev,
"Engine group for inline ipsec is not available\n");
return -ENOENT;
}
otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
&cptpf->afpf_mbox, BLKADDR_CPT0);
ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
1);
if (ret) {
dev_err(&cptpf->pdev->dev,
"LF configuration failed for RX inline ipsec.\n");
return ret;
}
if (cptpf->has_cpt1) {
cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
cptpf->reg_base, &cptpf->afpf_mbox,
BLKADDR_CPT1);
ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret) {
dev_err(&cptpf->pdev->dev,
"LF configuration failed for RX inline ipsec.\n");
goto lf_cleanup;
}
cptpf->rsrc_req_blkaddr = 0;
}
ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
if (ret)
goto lf1_cleanup;
return 0;
lf1_cleanup:
otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
lf_cleanup:
otx2_cptlf_shutdown(&cptpf->lfs);
return ret;
}
static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req, int size)
{
int err = 0;
/* Check if msg is valid, if not reply with an invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG)
goto inval_msg;
switch (req->id) {
case MBOX_MSG_GET_ENG_GRP_NUM:
err = handle_msg_get_eng_grp_num(cptpf, vf, req);
break;
case MBOX_MSG_GET_CAPS:
err = handle_msg_get_caps(cptpf, vf, req);
break;
case MBOX_MSG_GET_KVF_LIMITS:
err = handle_msg_kvf_limits(cptpf, vf, req);
break;
case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
break;
default:
err = forward_to_af(cptpf, vf, req, size);
break;
}
return err;
inval_msg:
otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
return err;
}
irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptpf_dev *cptpf = arg;
struct otx2_cptvf_info *vf;
int i, vf_idx;
u64 intr;
/*
* Check which VF has raised an interrupt and schedule
* corresponding work queue to process the messages
*/
for (i = 0; i < 2; i++) {
/* Read the interrupt bits */
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INTX(i));
for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
vf = &cptpf->vf[vf_idx];
if (intr & (1ULL << vf->intr_idx)) {
queue_work(cptpf->vfpf_mbox_wq,
&vf->vfpf_mbox_work);
/* Clear the interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
0, RVU_PF_VFPF_MBOX_INTX(i),
BIT_ULL(vf->intr_idx));
}
}
}
return IRQ_HANDLED;
}
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
{
struct otx2_cptpf_dev *cptpf;
struct otx2_cptvf_info *vf;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *req_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
int offset, i, err;
vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
cptpf = vf->cptpf;
mbox = &cptpf->vfpf_mbox;
/* sync with mbox memory region */
smp_rmb();
mdev = &mbox->dev[vf->vf_id];
/* Process received mbox messages */
req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
for (i = 0; i < req_hdr->num_msgs; i++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
/* Set which VF sent this message based on mbox IRQ */
msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
err = cptpf_handle_vf_req(cptpf, vf, msg,
msg->next_msgoff - offset);
/*
* Behave as the AF, drop the msg if there is
* no memory, timeout handling also goes here
*/
if (err == -ENOMEM || err == -EIO)
break;
offset = msg->next_msgoff;
/* Write barrier required for VF responses which are handled by
* PF driver and not forwarded to AF.
*/
smp_wmb();
}
/* Send mbox responses to VF */
if (mdev->num_msgs)
otx2_mbox_msg_send(mbox, vf->vf_id);
}
irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptpf_dev *cptpf = arg;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
u64 intr;
/* Read the interrupt bits */
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
if (intr & 0x1ULL) {
mbox = &cptpf->afpf_mbox;
mdev = &mbox->dev[0];
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs)
/* Schedule work queue function to process the MBOX request */
queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
mbox = &cptpf->afpf_mbox_up;
mdev = &mbox->dev[0];
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs)
/* Schedule work queue function to process the MBOX request */
queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
/* Clear and ack the interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
0x1ULL);
}
return IRQ_HANDLED;
}
static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg)
{
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct device *dev = &cptpf->pdev->dev;
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
return;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
msg->sig, msg->id);
return;
}
if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
lfs = &cptpf->cpt1_lfs;
switch (msg->id) {
case MBOX_MSG_READY:
cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
RVU_PFVF_PF_MASK;
break;
case MBOX_MSG_CPT_RD_WR_REGISTER:
rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
if (msg->rc) {
dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
msg->rc);
return;
}
if (!rsp_rd_wr->is_write)
*rsp_rd_wr->ret_val = rsp_rd_wr->val;
break;
case MBOX_MSG_ATTACH_RESOURCES:
if (!msg->rc)
lfs->are_lfs_attached = 1;
break;
case MBOX_MSG_DETACH_RESOURCES:
if (!msg->rc)
lfs->are_lfs_attached = 0;
break;
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
break;
default:
dev_err(dev,
"Unsupported msg %d received.\n", msg->id);
break;
}
}
static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
int vf_id, int size)
{
struct otx2_mbox *vfpf_mbox;
struct mbox_msghdr *fwd;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(&cptpf->pdev->dev,
"MBOX msg with unknown ID %d\n", msg->id);
return;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(&cptpf->pdev->dev,
"MBOX msg with wrong signature %x, ID %d\n",
msg->sig, msg->id);
return;
}
vfpf_mbox = &cptpf->vfpf_mbox;
vf_id--;
if (vf_id >= cptpf->enabled_vfs) {
dev_err(&cptpf->pdev->dev,
"MBOX msg to unknown VF: %d >= %d\n",
vf_id, cptpf->enabled_vfs);
return;
}
if (msg->id == MBOX_MSG_VF_FLR)
return;
fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
if (!fwd) {
dev_err(&cptpf->pdev->dev,
"Forwarding to VF%d failed.\n", vf_id);
return;
}
memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
(uint8_t *)msg + sizeof(struct mbox_msghdr), size);
fwd->id = msg->id;
fwd->pcifunc = msg->pcifunc;
fwd->sig = msg->sig;
fwd->ver = msg->ver;
fwd->rc = msg->rc;
}
/* Handle mailbox messages received from AF */
void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
{
struct otx2_cptpf_dev *cptpf;
struct otx2_mbox *afpf_mbox;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
int offset, vf_id, i;
cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
afpf_mbox = &cptpf->afpf_mbox;
mdev = &afpf_mbox->dev[0];
/* Sync mbox data into memory */
smp_wmb();
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (i = 0; i < rsp_hdr->num_msgs; i++) {
msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
offset);
vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
RVU_PFVF_FUNC_MASK;
if (vf_id > 0)
forward_to_vf(cptpf, msg, vf_id,
msg->next_msgoff - offset);
else
process_afpf_mbox_msg(cptpf, msg);
offset = msg->next_msgoff;
/* Sync VF response ready to be sent */
smp_wmb();
mdev->msgs_acked++;
}
otx2_mbox_reset(afpf_mbox, 0);
}
static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg)
{
struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct msg_rsp *rsp;
if (cptpf->lfs.lfs_num)
lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
&lfs->lf[0]);
rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
sizeof(*rsp));
if (!rsp)
return;
rsp->hdr.id = msg->id;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = 0;
rsp->hdr.rc = 0;
}
static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg)
{
if (msg->id >= MBOX_MSG_MAX) {
dev_err(&cptpf->pdev->dev,
"MBOX msg with unknown ID %d\n", msg->id);
return;
}
switch (msg->id) {
case MBOX_MSG_CPT_INST_LMTST:
handle_msg_cpt_inst_lmtst(cptpf, msg);
break;
default:
otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
}
}
void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
{
struct otx2_cptpf_dev *cptpf;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
int offset, i;
cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
mbox = &cptpf->afpf_mbox_up;
mdev = &mbox->dev[0];
/* Sync mbox data into memory */
smp_wmb();
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (i = 0; i < rsp_hdr->num_msgs; i++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
process_afpf_mbox_up_msg(cptpf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
otx2_mbox_msg_send(mbox, 0);
}
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include <crypto/aes.h>
#include <crypto/authenc.h>
#include <crypto/cryptd.h>
#include <crypto/des.h>
#include <crypto/internal/aead.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/xts.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx2_cptvf.h"
#include "otx2_cptvf_algs.h"
#include "otx2_cpt_reqmgr.h"
/* Size of salt in AES GCM mode */
#define AES_GCM_SALT_SIZE 4
/* Size of IV in AES GCM mode */
#define AES_GCM_IV_SIZE 8
/* Size of ICV (Integrity Check Value) in AES GCM mode */
#define AES_GCM_ICV_SIZE 16
/* Offset of IV in AES GCM mode */
#define AES_GCM_IV_OFFSET 8
#define CONTROL_WORD_LEN 8
#define KEY2_OFFSET 48
#define DMA_MODE_FLAG(dma_mode) \
(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
/* Truncated SHA digest size */
#define SHA1_TRUNC_DIGEST_SIZE 12
#define SHA256_TRUNC_DIGEST_SIZE 16
#define SHA384_TRUNC_DIGEST_SIZE 24
#define SHA512_TRUNC_DIGEST_SIZE 32
static DEFINE_MUTEX(mutex);
static int is_crypto_registered;
struct cpt_device_desc {
struct pci_dev *dev;
int num_queues;
};
struct cpt_device_table {
atomic_t count;
struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
};
static struct cpt_device_table se_devices = {
.count = ATOMIC_INIT(0)
};
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count;
count = atomic_read(&se_devices.count);
if (count < 1)
return -ENODEV;
*cpu_num = get_cpu();
/*
* On OcteonTX2 platform CPT instruction queue is bound to each
* local function LF, in turn LFs can be attached to PF
* or VF therefore we always use first device. We get maximum
* performance if one CPT queue is available for each cpu
* otherwise CPT queues need to be shared between cpus.
*/
if (*cpu_num >= se_devices.desc[0].num_queues)
*cpu_num %= se_devices.desc[0].num_queues;
*pdev = se_devices.desc[0].dev;
put_cpu();
return 0;
}
static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
{
struct otx2_cpt_req_ctx *rctx;
struct aead_request *req;
struct crypto_aead *tfm;
req = container_of(cpt_req->areq, struct aead_request, base);
tfm = crypto_aead_reqtfm(req);
rctx = aead_request_ctx_dma(req);
if (memcmp(rctx->fctx.hmac.s.hmac_calc,
rctx->fctx.hmac.s.hmac_recv,
crypto_aead_authsize(tfm)) != 0)
return -EBADMSG;
return 0;
}
static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
{
struct otx2_cpt_inst_info *inst_info = arg2;
struct crypto_async_request *areq = arg1;
struct otx2_cpt_req_info *cpt_req;
struct pci_dev *pdev;
if (inst_info) {
cpt_req = inst_info->req;
if (!status) {
/*
* When selected cipher is NULL we need to manually
* verify whether calculated hmac value matches
* received hmac value
*/
if (cpt_req->req_type ==
OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
!cpt_req->is_enc)
status = validate_hmac_cipher_null(cpt_req);
}
pdev = inst_info->pdev;
otx2_cpt_info_destroy(pdev, inst_info);
}
if (areq)
crypto_request_complete(areq, status);
}
static void output_iv_copyback(struct crypto_async_request *areq)
{
struct otx2_cpt_req_info *req_info;
struct otx2_cpt_req_ctx *rctx;
struct skcipher_request *sreq;
struct crypto_skcipher *stfm;
struct otx2_cpt_enc_ctx *ctx;
u32 start, ivsize;
sreq = container_of(areq, struct skcipher_request, base);
stfm = crypto_skcipher_reqtfm(sreq);
ctx = crypto_skcipher_ctx(stfm);
if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
ctx->cipher_type == OTX2_CPT_DES3_CBC) {
rctx = skcipher_request_ctx_dma(sreq);
req_info = &rctx->cpt_req;
ivsize = crypto_skcipher_ivsize(stfm);
start = sreq->cryptlen - ivsize;
if (req_info->is_enc) {
scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
ivsize, 0);
} else {
if (sreq->src != sreq->dst) {
scatterwalk_map_and_copy(sreq->iv, sreq->src,
start, ivsize, 0);
} else {
memcpy(sreq->iv, req_info->iv_out, ivsize);
kfree(req_info->iv_out);
}
}
}
}
static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
{
struct otx2_cpt_inst_info *inst_info = arg2;
struct crypto_async_request *areq = arg1;
struct pci_dev *pdev;
if (areq) {
if (!status)
output_iv_copyback(areq);
if (inst_info) {
pdev = inst_info->pdev;
otx2_cpt_info_destroy(pdev, inst_info);
}
crypto_request_complete(areq, status);
}
}
static inline void update_input_data(struct otx2_cpt_req_info *req_info,
struct scatterlist *inp_sg,
u32 nbytes, u32 *argcnt)
{
req_info->req.dlen += nbytes;
while (nbytes) {
u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
u8 *ptr = sg_virt(inp_sg);
req_info->in[*argcnt].vptr = (void *)ptr;
req_info->in[*argcnt].size = len;
nbytes -= len;
++(*argcnt);
inp_sg = sg_next(inp_sg);
}
}
static inline void update_output_data(struct otx2_cpt_req_info *req_info,
struct scatterlist *outp_sg,
u32 offset, u32 nbytes, u32 *argcnt)
{
u32 len, sg_len;
u8 *ptr;
req_info->rlen += nbytes;
while (nbytes) {
sg_len = outp_sg->length - offset;
len = (nbytes < sg_len) ? nbytes : sg_len;
ptr = sg_virt(outp_sg);
req_info->out[*argcnt].vptr = (void *) (ptr + offset);
req_info->out[*argcnt].size = len;
nbytes -= len;
++(*argcnt);
offset = 0;
outp_sg = sg_next(outp_sg);
}
}
static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
int ivsize = crypto_skcipher_ivsize(stfm);
u32 start = req->cryptlen - ivsize;
gfp_t flags;
flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
req_info->ctrl.s.se_req = 1;
req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
if (enc) {
req_info->req.opcode.s.minor = 2;
} else {
req_info->req.opcode.s.minor = 3;
if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
req->src == req->dst) {
req_info->iv_out = kmalloc(ivsize, flags);
if (!req_info->iv_out)
return -ENOMEM;
scatterwalk_map_and_copy(req_info->iv_out, req->src,
start, ivsize, 0);
}
}
/* Encryption data length */
req_info->req.param1 = req->cryptlen;
/* Authentication data length */
req_info->req.param2 = 0;
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
if (ctx->cipher_type == OTX2_CPT_AES_XTS)
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
else
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
cpu_to_be64s(&fctx->enc.enc_ctrl.u);
/*
* Storing Packet Data Information in offset
* Control Word First 8 bytes
*/
req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
req_info->in[*argcnt].size = CONTROL_WORD_LEN;
req_info->req.dlen += CONTROL_WORD_LEN;
++(*argcnt);
req_info->in[*argcnt].vptr = (u8 *)fctx;
req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
++(*argcnt);
return 0;
}
static inline int create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
int ret;
ret = create_ctx_hdr(req, enc, &argcnt);
if (ret)
return ret;
update_input_data(req_info, req->src, req->cryptlen, &argcnt);
req_info->in_cnt = argcnt;
return 0;
}
static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
/*
* OUTPUT Buffer Processing
* AES encryption/decryption output would be
* received in the following format
*
* ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
* [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
*/
update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
req_info->out_cnt = argcnt;
}
static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
int ret;
if (ctx->fbk_cipher) {
skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
skcipher_request_set_callback(&rctx->sk_fbk_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
req->dst, req->cryptlen, req->iv);
ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
crypto_skcipher_decrypt(&rctx->sk_fbk_req);
} else {
ret = -EINVAL;
}
return ret;
}
static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
struct pci_dev *pdev;
int status, cpu_num;
if (req->cryptlen == 0)
return 0;
if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
return -EINVAL;
if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
return skcipher_do_fallback(req, enc);
/* Clear control words */
rctx->ctrl_word.flags = 0;
rctx->fctx.enc.enc_ctrl.u = 0;
status = create_input_list(req, enc, enc_iv_len);
if (status)
return status;
create_output_list(req, enc_iv_len);
status = get_se_device(&pdev, &cpu_num);
if (status)
return status;
req_info->callback = otx2_cpt_skcipher_callback;
req_info->areq = &req->base;
req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
/*
* We perform an asynchronous send and once
* the request is completed the driver would
* intimate through registered call back functions
*/
status = otx2_cpt_do_request(pdev, req_info, cpu_num);
return status;
}
static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
{
return cpt_enc_dec(req, true);
}
static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
{
return cpt_enc_dec(req, false);
}
static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
const u8 *key2 = key + (keylen / 2);
const u8 *key1 = key;
int ret;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
ctx->key_len = keylen;
ctx->enc_align_len = 1;
memcpy(ctx->enc_key, key1, keylen / 2);
memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
ctx->cipher_type = OTX2_CPT_AES_XTS;
switch (ctx->key_len) {
case 2 * AES_KEYSIZE_128:
ctx->key_type = OTX2_CPT_AES_128_BIT;
break;
case 2 * AES_KEYSIZE_192:
ctx->key_type = OTX2_CPT_AES_192_BIT;
break;
case 2 * AES_KEYSIZE_256:
ctx->key_type = OTX2_CPT_AES_256_BIT;
break;
default:
return -EINVAL;
}
return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
}
static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
u32 keylen, u8 cipher_type)
{
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
if (keylen != DES3_EDE_KEY_SIZE)
return -EINVAL;
ctx->key_len = keylen;
ctx->cipher_type = cipher_type;
ctx->enc_align_len = 8;
memcpy(ctx->enc_key, key, keylen);
return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
}
static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
u32 keylen, u8 cipher_type)
{
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (keylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX2_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX2_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX2_CPT_AES_256_BIT;
break;
default:
return -EINVAL;
}
if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
ctx->enc_align_len = 16;
else
ctx->enc_align_len = 1;
ctx->key_len = keylen;
ctx->cipher_type = cipher_type;
memcpy(ctx->enc_key, key, keylen);
return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
}
static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
}
static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
}
static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
}
static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
}
static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
struct crypto_alg *alg)
{
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->fbk_cipher =
crypto_alloc_skcipher(alg->cra_name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fbk_cipher)) {
pr_err("%s() failed to allocate fallback for %s\n",
__func__, alg->cra_name);
return PTR_ERR(ctx->fbk_cipher);
}
}
return 0;
}
static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
{
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct crypto_alg *alg = tfm->__crt_alg;
memset(ctx, 0, sizeof(*ctx));
/*
* Additional memory for skcipher_request is
* allocated since the cryptd daemon uses
* this memory for request_ctx information
*/
crypto_skcipher_set_reqsize_dma(
stfm, sizeof(struct otx2_cpt_req_ctx) +
sizeof(struct skcipher_request));
return cpt_skcipher_fallback_init(ctx, alg);
}
static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
{
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fbk_cipher) {
crypto_free_skcipher(ctx->fbk_cipher);
ctx->fbk_cipher = NULL;
}
}
static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
struct crypto_alg *alg)
{
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->fbk_cipher =
crypto_alloc_aead(alg->cra_name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fbk_cipher)) {
pr_err("%s() failed to allocate fallback for %s\n",
__func__, alg->cra_name);
return PTR_ERR(ctx->fbk_cipher);
}
}
return 0;
}
static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
struct crypto_alg *alg = tfm->__crt_alg;
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
switch (ctx->mac_type) {
case OTX2_CPT_SHA1:
ctx->hashalg = crypto_alloc_shash("sha1", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX2_CPT_SHA256:
ctx->hashalg = crypto_alloc_shash("sha256", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX2_CPT_SHA384:
ctx->hashalg = crypto_alloc_shash("sha384", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX2_CPT_SHA512:
ctx->hashalg = crypto_alloc_shash("sha512", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
}
}
switch (ctx->cipher_type) {
case OTX2_CPT_AES_CBC:
case OTX2_CPT_AES_ECB:
ctx->enc_align_len = 16;
break;
case OTX2_CPT_DES3_CBC:
case OTX2_CPT_DES3_ECB:
ctx->enc_align_len = 8;
break;
case OTX2_CPT_AES_GCM:
case OTX2_CPT_CIPHER_NULL:
ctx->enc_align_len = 1;
break;
}
crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
return cpt_aead_fallback_init(ctx, alg);
}
static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
}
static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
}
static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
}
static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
}
static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
}
static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
}
static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
}
static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
}
static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
}
static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
kfree(ctx->ipad);
kfree(ctx->opad);
if (ctx->hashalg)
crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
if (ctx->fbk_cipher) {
crypto_free_aead(ctx->fbk_cipher);
ctx->fbk_cipher = NULL;
}
}
static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
if (crypto_rfc4106_check_authsize(authsize))
return -EINVAL;
tfm->authsize = authsize;
/* Set authsize for fallback case */
if (ctx->fbk_cipher)
ctx->fbk_cipher->authsize = authsize;
return 0;
}
static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
tfm->authsize = authsize;
return 0;
}
static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
ctx->is_trunc_hmac = true;
tfm->authsize = authsize;
return 0;
}
static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
{
struct otx2_cpt_sdesc *sdesc;
int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc)
return NULL;
sdesc->shash.tfm = alg;
return sdesc;
}
static inline void swap_data32(void *buf, u32 len)
{
cpu_to_be32_array(buf, buf, len / 4);
}
static inline void swap_data64(void *buf, u32 len)
{
u64 *src = buf;
int i = 0;
for (i = 0 ; i < len / 8; i++, src++)
cpu_to_be64s(src);
}
static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
struct sha1_state *sha1;
switch (mac_type) {
case OTX2_CPT_SHA1:
sha1 = (struct sha1_state *) in_pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX2_CPT_SHA256:
sha256 = (struct sha256_state *) in_pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX2_CPT_SHA384:
case OTX2_CPT_SHA512:
sha512 = (struct sha512_state *) in_pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
return -EINVAL;
}
return 0;
}
static int aead_hmac_init(struct crypto_aead *cipher)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
int authkeylen = ctx->auth_key_len;
u8 *ipad = NULL, *opad = NULL;
int ret = 0, icount = 0;
ctx->sdesc = alloc_sdesc(ctx->hashalg);
if (!ctx->sdesc)
return -ENOMEM;
ctx->ipad = kzalloc(bs, GFP_KERNEL);
if (!ctx->ipad) {
ret = -ENOMEM;
goto calc_fail;
}
ctx->opad = kzalloc(bs, GFP_KERNEL);
if (!ctx->opad) {
ret = -ENOMEM;
goto calc_fail;
}
ipad = kzalloc(state_size, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto calc_fail;
}
opad = kzalloc(state_size, GFP_KERNEL);
if (!opad) {
ret = -ENOMEM;
goto calc_fail;
}
if (authkeylen > bs) {
ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
authkeylen, ipad);
if (ret)
goto calc_fail;
authkeylen = ds;
} else {
memcpy(ipad, ctx->key, authkeylen);
}
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
for (icount = 0; icount < bs; icount++) {
ipad[icount] ^= 0x36;
opad[icount] ^= 0x5c;
}
/*
* Partial Hash calculated from the software
* algorithm is retrieved for IPAD & OPAD
*/
/* IPAD Calculation */
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
if (ret)
goto calc_fail;
/* OPAD Calculation */
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
ret = copy_pad(ctx->mac_type, ctx->opad, opad);
if (ret)
goto calc_fail;
kfree(ipad);
kfree(opad);
return 0;
calc_fail:
kfree(ctx->ipad);
ctx->ipad = NULL;
kfree(ctx->opad);
ctx->opad = NULL;
kfree(ipad);
kfree(opad);
kfree(ctx->sdesc);
ctx->sdesc = NULL;
return ret;
}
static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
if (!RTA_OK(rta, keylen))
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
return -EINVAL;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
return -EINVAL;
if (keylen > OTX2_CPT_MAX_KEY_SIZE)
return -EINVAL;
authkeylen = keylen - enckeylen;
memcpy(ctx->key, key, keylen);
switch (enckeylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX2_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX2_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX2_CPT_AES_256_BIT;
break;
default:
/* Invalid key length */
return -EINVAL;
}
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = authkeylen;
return aead_hmac_init(cipher);
}
static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
struct rtattr *rta = (void *)key;
int enckeylen = 0;
if (!RTA_OK(rta, keylen))
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
return -EINVAL;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (enckeylen != 0)
return -EINVAL;
if (keylen > OTX2_CPT_MAX_KEY_SIZE)
return -EINVAL;
memcpy(ctx->key, key, keylen);
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = keylen;
return 0;
}
static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
/*
* For aes gcm we expect to get encryption key (16, 24, 32 bytes)
* and salt (4 bytes)
*/
switch (keylen) {
case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
ctx->key_type = OTX2_CPT_AES_128_BIT;
ctx->enc_key_len = AES_KEYSIZE_128;
break;
case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
ctx->key_type = OTX2_CPT_AES_192_BIT;
ctx->enc_key_len = AES_KEYSIZE_192;
break;
case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
ctx->key_type = OTX2_CPT_AES_256_BIT;
ctx->enc_key_len = AES_KEYSIZE_256;
break;
default:
/* Invalid key and salt length */
return -EINVAL;
}
/* Store encryption key and salt */
memcpy(ctx->key, key, keylen);
return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
}
static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
u32 *argcnt)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
int mac_len = crypto_aead_authsize(tfm);
int ds;
rctx->ctrl_word.e.enc_data_offset = req->assoclen;
switch (ctx->cipher_type) {
case OTX2_CPT_AES_CBC:
if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
return -EINVAL;
fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
/* Copy encryption key to context */
memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
ctx->enc_key_len);
/* Copy IV to context */
memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
ds = crypto_shash_digestsize(ctx->hashalg);
if (ctx->mac_type == OTX2_CPT_SHA384)
ds = SHA512_DIGEST_SIZE;
if (ctx->ipad)
memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
if (ctx->opad)
memcpy(fctx->hmac.e.opad, ctx->opad, ds);
break;
case OTX2_CPT_AES_GCM:
if (crypto_ipsec_check_assoclen(req->assoclen))
return -EINVAL;
fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
/* Copy encryption key to context */
memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
/* Copy salt to context */
memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
AES_GCM_SALT_SIZE);
rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
break;
default:
/* Unknown cipher type */
return -EINVAL;
}
cpu_to_be64s(&rctx->ctrl_word.flags);
req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
req_info->ctrl.s.se_req = 1;
req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
if (enc) {
req_info->req.opcode.s.minor = 2;
req_info->req.param1 = req->cryptlen;
req_info->req.param2 = req->cryptlen + req->assoclen;
} else {
req_info->req.opcode.s.minor = 3;
req_info->req.param1 = req->cryptlen - mac_len;
req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
}
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
fctx->enc.enc_ctrl.e.mac_len = mac_len;
cpu_to_be64s(&fctx->enc.enc_ctrl.u);
/*
* Storing Packet Data Information in offset
* Control Word First 8 bytes
*/
req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
req_info->in[*argcnt].size = CONTROL_WORD_LEN;
req_info->req.dlen += CONTROL_WORD_LEN;
++(*argcnt);
req_info->in[*argcnt].vptr = (u8 *)fctx;
req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
++(*argcnt);
return 0;
}
static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
u32 enc)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
req_info->ctrl.s.se_req = 1;
req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
req_info->is_trunc_hmac = ctx->is_trunc_hmac;
req_info->req.opcode.s.minor = 0;
req_info->req.param1 = ctx->auth_key_len;
req_info->req.param2 = ctx->mac_type << 8;
/* Add authentication key */
req_info->in[*argcnt].vptr = ctx->key;
req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
req_info->req.dlen += round_up(ctx->auth_key_len, 8);
++(*argcnt);
}
static inline int create_aead_input_list(struct aead_request *req, u32 enc)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen = req->cryptlen + req->assoclen;
u32 status, argcnt = 0;
status = create_aead_ctx_hdr(req, enc, &argcnt);
if (status)
return status;
update_input_data(req_info, req->src, inputlen, &argcnt);
req_info->in_cnt = argcnt;
return 0;
}
static inline void create_aead_output_list(struct aead_request *req, u32 enc,
u32 mac_len)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0, outputlen = 0;
if (enc)
outputlen = req->cryptlen + req->assoclen + mac_len;
else
outputlen = req->cryptlen + req->assoclen - mac_len;
update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
req_info->out_cnt = argcnt;
}
static inline void create_aead_null_input_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen, argcnt = 0;
if (enc)
inputlen = req->cryptlen + req->assoclen;
else
inputlen = req->cryptlen + req->assoclen - mac_len;
create_hmac_ctx_hdr(req, &argcnt, enc);
update_input_data(req_info, req->src, inputlen, &argcnt);
req_info->in_cnt = argcnt;
}
static inline int create_aead_null_output_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct scatterlist *dst;
u8 *ptr = NULL;
int argcnt = 0, status, offset;
u32 inputlen;
if (enc)
inputlen = req->cryptlen + req->assoclen;
else
inputlen = req->cryptlen + req->assoclen - mac_len;
/*
* If source and destination are different
* then copy payload to destination
*/
if (req->src != req->dst) {
ptr = kmalloc(inputlen, (req_info->areq->flags &
CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!ptr)
return -ENOMEM;
status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
inputlen);
if (status != inputlen) {
status = -EINVAL;
goto error_free;
}
status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
inputlen);
if (status != inputlen) {
status = -EINVAL;
goto error_free;
}
kfree(ptr);
}
if (enc) {
/*
* In an encryption scenario hmac needs
* to be appended after payload
*/
dst = req->dst;
offset = inputlen;
while (offset >= dst->length) {
offset -= dst->length;
dst = sg_next(dst);
if (!dst)
return -ENOENT;
}
update_output_data(req_info, dst, offset, mac_len, &argcnt);
} else {
/*
* In a decryption scenario calculated hmac for received
* payload needs to be compare with hmac received
*/
status = sg_copy_buffer(req->src, sg_nents(req->src),
rctx->fctx.hmac.s.hmac_recv, mac_len,
inputlen, true);
if (status != mac_len)
return -EINVAL;
req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
req_info->out[argcnt].size = mac_len;
argcnt++;
}
req_info->out_cnt = argcnt;
return 0;
error_free:
kfree(ptr);
return status;
}
static int aead_do_fallback(struct aead_request *req, bool is_enc)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
int ret;
if (ctx->fbk_cipher) {
/* Store the cipher tfm and then use the fallback tfm */
aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
aead_request_set_callback(&rctx->fbk_req, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(&rctx->fbk_req, req->src,
req->dst, req->cryptlen, req->iv);
aead_request_set_ad(&rctx->fbk_req, req->assoclen);
ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
crypto_aead_decrypt(&rctx->fbk_req);
} else {
ret = -EINVAL;
}
return ret;
}
static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
{
struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct pci_dev *pdev;
int status, cpu_num;
/* Clear control words */
rctx->ctrl_word.flags = 0;
rctx->fctx.enc.enc_ctrl.u = 0;
req_info->callback = otx2_cpt_aead_callback;
req_info->areq = &req->base;
req_info->req_type = reg_type;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
switch (reg_type) {
case OTX2_CPT_AEAD_ENC_DEC_REQ:
status = create_aead_input_list(req, enc);
if (status)
return status;
create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
break;
case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
create_aead_null_input_list(req, enc,
crypto_aead_authsize(tfm));
status = create_aead_null_output_list(req, enc,
crypto_aead_authsize(tfm));
if (status)
return status;
break;
default:
return -EINVAL;
}
if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
return -EINVAL;
if (!req_info->req.param2 ||
(req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
(req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
return aead_do_fallback(req, enc);
status = get_se_device(&pdev, &cpu_num);
if (status)
return status;
req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
/*
* We perform an asynchronous send and once
* the request is completed the driver would
* intimate through registered call back functions
*/
return otx2_cpt_do_request(pdev, req_info, cpu_num);
}
static int otx2_cpt_aead_encrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
}
static int otx2_cpt_aead_decrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
}
static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
}
static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
}
static struct skcipher_alg otx2_cpt_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "cpt_xts_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx2_cpt_enc_dec_init,
.exit = otx2_cpt_skcipher_exit,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.setkey = otx2_cpt_skcipher_xts_setkey,
.encrypt = otx2_cpt_skcipher_encrypt,
.decrypt = otx2_cpt_skcipher_decrypt,
}, {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cpt_cbc_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx2_cpt_enc_dec_init,
.exit = otx2_cpt_skcipher_exit,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
.encrypt = otx2_cpt_skcipher_encrypt,
.decrypt = otx2_cpt_skcipher_decrypt,
}, {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "cpt_ecb_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx2_cpt_enc_dec_init,
.exit = otx2_cpt_skcipher_exit,
.ivsize = 0,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
.encrypt = otx2_cpt_skcipher_encrypt,
.decrypt = otx2_cpt_skcipher_decrypt,
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cpt_cbc_des3_ede",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx2_cpt_enc_dec_init,
.exit = otx2_cpt_skcipher_exit,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
.encrypt = otx2_cpt_skcipher_encrypt,
.decrypt = otx2_cpt_skcipher_decrypt,
}, {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "cpt_ecb_des3_ede",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx2_cpt_enc_dec_init,
.exit = otx2_cpt_skcipher_exit,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = 0,
.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
.encrypt = otx2_cpt_skcipher_encrypt,
.decrypt = otx2_cpt_skcipher_decrypt,
} };
static struct aead_alg otx2_cpt_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_cbc_aes_sha1_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx2_cpt_aead_set_authsize,
.encrypt = otx2_cpt_aead_encrypt,
.decrypt = otx2_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_cbc_aes_sha256_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx2_cpt_aead_set_authsize,
.encrypt = otx2_cpt_aead_encrypt,
.decrypt = otx2_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_cbc_aes_sha384_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx2_cpt_aead_set_authsize,
.encrypt = otx2_cpt_aead_encrypt,
.decrypt = otx2_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_cbc_aes_sha512_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx2_cpt_aead_set_authsize,
.encrypt = otx2_cpt_aead_encrypt,
.decrypt = otx2_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha1_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_ecb_null_sha1_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx2_cpt_aead_null_set_authsize,
.encrypt = otx2_cpt_aead_null_encrypt,
.decrypt = otx2_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA1_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha256_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_ecb_null_sha256_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx2_cpt_aead_null_set_authsize,
.encrypt = otx2_cpt_aead_null_encrypt,
.decrypt = otx2_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA256_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha384_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_ecb_null_sha384_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx2_cpt_aead_null_set_authsize,
.encrypt = otx2_cpt_aead_null_encrypt,
.decrypt = otx2_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA384_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha512_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_ecb_null_sha512_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx2_cpt_aead_null_set_authsize,
.encrypt = otx2_cpt_aead_null_encrypt,
.decrypt = otx2_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA512_DIGEST_SIZE,
}, {
.base = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "cpt_rfc4106_gcm_aes",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx2_cpt_aead_gcm_aes_init,
.exit = otx2_cpt_aead_exit,
.setkey = otx2_cpt_aead_gcm_aes_setkey,
.setauthsize = otx2_cpt_aead_gcm_set_authsize,
.encrypt = otx2_cpt_aead_encrypt,
.decrypt = otx2_cpt_aead_decrypt,
.ivsize = AES_GCM_IV_SIZE,
.maxauthsize = AES_GCM_ICV_SIZE,
} };
static inline int cpt_register_algs(void)
{
int i, err = 0;
for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_register_skciphers(otx2_cpt_skciphers,
ARRAY_SIZE(otx2_cpt_skciphers));
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_register_aeads(otx2_cpt_aeads,
ARRAY_SIZE(otx2_cpt_aeads));
if (err) {
crypto_unregister_skciphers(otx2_cpt_skciphers,
ARRAY_SIZE(otx2_cpt_skciphers));
return err;
}
return 0;
}
static inline void cpt_unregister_algs(void)
{
crypto_unregister_skciphers(otx2_cpt_skciphers,
ARRAY_SIZE(otx2_cpt_skciphers));
crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
}
static int compare_func(const void *lptr, const void *rptr)
{
const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
if (ldesc->dev->devfn < rdesc->dev->devfn)
return -1;
if (ldesc->dev->devfn > rdesc->dev->devfn)
return 1;
return 0;
}
static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = lptr;
struct cpt_device_desc *rdesc = rptr;
swap(*ldesc, *rdesc);
}
int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices)
{
int ret = 0;
int count;
mutex_lock(&mutex);
count = atomic_read(&se_devices.count);
if (count >= OTX2_CPT_MAX_LFS_NUM) {
dev_err(&pdev->dev, "No space to add a new device\n");
ret = -ENOSPC;
goto unlock;
}
se_devices.desc[count].num_queues = num_queues;
se_devices.desc[count++].dev = pdev;
atomic_inc(&se_devices.count);
if (atomic_read(&se_devices.count) == num_devices &&
is_crypto_registered == false) {
if (cpt_register_algs()) {
dev_err(&pdev->dev,
"Error in registering crypto algorithms\n");
ret = -EINVAL;
goto unlock;
}
try_module_get(mod);
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
compare_func, swap_func);
unlock:
mutex_unlock(&mutex);
return ret;
}
void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
{
struct cpt_device_table *dev_tbl;
bool dev_found = false;
int i, j, count;
mutex_lock(&mutex);
dev_tbl = &se_devices;
count = atomic_read(&dev_tbl->count);
for (i = 0; i < count; i++) {
if (pdev == dev_tbl->desc[i].dev) {
for (j = i; j < count-1; j++)
dev_tbl->desc[j] = dev_tbl->desc[j+1];
dev_found = true;
break;
}
}
if (!dev_found) {
dev_err(&pdev->dev, "%s device not found\n", __func__);
goto unlock;
}
if (atomic_dec_and_test(&se_devices.count)) {
cpt_unregister_algs();
module_put(mod);
is_crypto_registered = false;
}
unlock:
mutex_unlock(&mutex);
}
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2021 Marvell. */
#include "otx2_cpt_devlink.h"
static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
return otx2_cpt_dl_custom_egrp_create(cptpf, ctx);
}
static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
return otx2_cpt_dl_custom_egrp_delete(cptpf, ctx);
}
static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
otx2_cpt_print_uc_dbg_info(cptpf);
return 0;
}
enum otx2_cpt_dl_param_id {
OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
};
static const struct devlink_param otx2_cpt_dl_params[] = {
DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
"egrp_create", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_create,
NULL),
DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
"egrp_delete", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete,
NULL),
};
static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req,
struct otx2_cpt_eng_grp_info grp[],
const char *ver_name, int eng_type)
{
struct otx2_cpt_engs_rsvd *eng;
int i;
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
eng = find_engines_by_type(&grp[i], eng_type);
if (eng)
return devlink_info_version_running_put(req, ver_name,
eng->ucode->ver_str);
}
return 0;
}
static int otx2_cpt_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
int err;
err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
"fw.ae", OTX2_CPT_AE_TYPES);
if (err)
return err;
err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
"fw.se", OTX2_CPT_SE_TYPES);
if (err)
return err;
return otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
"fw.ie", OTX2_CPT_IE_TYPES);
}
static const struct devlink_ops otx2_cpt_devlink_ops = {
.info_get = otx2_cpt_devlink_info_get,
};
int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf)
{
struct device *dev = &cptpf->pdev->dev;
struct otx2_cpt_devlink *cpt_dl;
struct devlink *dl;
int ret;
dl = devlink_alloc(&otx2_cpt_devlink_ops,
sizeof(struct otx2_cpt_devlink), dev);
if (!dl) {
dev_warn(dev, "devlink_alloc failed\n");
return -ENOMEM;
}
cpt_dl = devlink_priv(dl);
cpt_dl->dl = dl;
cpt_dl->cptpf = cptpf;
cptpf->dl = dl;
ret = devlink_params_register(dl, otx2_cpt_dl_params,
ARRAY_SIZE(otx2_cpt_dl_params));
if (ret) {
dev_err(dev, "devlink params register failed with error %d",
ret);
devlink_free(dl);
return ret;
}
devlink_register(dl);
return 0;
}
void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf)
{
struct devlink *dl = cptpf->dl;
if (!dl)
return;
devlink_unregister(dl);
devlink_params_unregister(dl, otx2_cpt_dl_params,
ARRAY_SIZE(otx2_cpt_dl_params));
devlink_free(dl);
}
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include <linux/firmware.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
#include "otx2_cpt_devlink.h"
#include "otx2_cptpf_ucode.h"
#include "otx2_cptpf.h"
#include "cn10k_cpt.h"
#include "rvu_reg.h"
#define OTX2_CPT_DRV_NAME "rvu_cptpf"
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
#define CPT_UC_RID_CN9K_B0 1
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
int ena_bits;
/* Clear any pending interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
/* Enable VF interrupts for VFs from 0 to 63 */
ena_bits = ((num_vfs - 1) % 64);
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
GENMASK_ULL(ena_bits, 0));
if (num_vfs > 64) {
/* Enable VF interrupts for VFs from 64 to 127 */
ena_bits = num_vfs - 64 - 1;
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
GENMASK_ULL(ena_bits, 0));
}
}
static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
int vector;
/* Disable VF-PF interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
/* Clear any pending interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
free_irq(vector, cptpf);
if (num_vfs > 64) {
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
free_irq(vector, cptpf);
}
}
static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
/* Clear FLR interrupt if any */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
INTR_MASK(num_vfs));
/* Enable VF FLR interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
/* Clear ME interrupt if any */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
INTR_MASK(num_vfs));
/* Enable VF ME interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
if (num_vfs <= 64)
return;
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
INTR_MASK(num_vfs - 64));
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
INTR_MASK(num_vfs - 64));
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
}
static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
int vector;
/* Disable VF FLR interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
free_irq(vector, cptpf);
/* Disable VF ME interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
free_irq(vector, cptpf);
if (num_vfs <= 64)
return;
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
free_irq(vector, cptpf);
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
free_irq(vector, cptpf);
}
static void cptpf_flr_wq_handler(struct work_struct *work)
{
struct cptpf_flr_work *flr_work;
struct otx2_cptpf_dev *pf;
struct mbox_msghdr *req;
struct otx2_mbox *mbox;
int vf, reg = 0;
flr_work = container_of(work, struct cptpf_flr_work, work);
pf = flr_work->pf;
mbox = &pf->afpf_mbox;
vf = flr_work - pf->flr_work;
mutex_lock(&pf->lock);
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msg_rsp));
if (!req) {
mutex_unlock(&pf->lock);
return;
}
req->sig = OTX2_MBOX_REQ_SIG;
req->id = MBOX_MSG_VF_FLR;
req->pcifunc &= RVU_PFVF_FUNC_MASK;
req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
otx2_cpt_send_mbox_msg(mbox, pf->pdev);
if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
if (vf >= 64) {
reg = 1;
vf = vf - 64;
}
/* Clear transaction pending register */
otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
}
mutex_unlock(&pf->lock);
}
static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
{
int reg, dev, vf, start_vf, num_reg = 1;
struct otx2_cptpf_dev *cptpf = arg;
u64 intr;
if (cptpf->max_vfs > 64)
num_reg = 2;
for (reg = 0; reg < num_reg; reg++) {
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INTX(reg));
if (!intr)
continue;
start_vf = 64 * reg;
for (vf = 0; vf < 64; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
dev = vf + start_vf;
queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
/* Clear interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
/* Disable the interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFFLR_INT_ENA_W1CX(reg),
BIT_ULL(vf));
}
}
return IRQ_HANDLED;
}
static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
{
struct otx2_cptpf_dev *cptpf = arg;
int reg, vf, num_reg = 1;
u64 intr;
if (cptpf->max_vfs > 64)
num_reg = 2;
for (reg = 0; reg < num_reg; reg++) {
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFME_INTX(reg));
if (!intr)
continue;
for (vf = 0; vf < 64; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
/* Clear interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
}
}
return IRQ_HANDLED;
}
static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
}
static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
{
struct pci_dev *pdev = cptpf->pdev;
struct device *dev = &pdev->dev;
int ret, vector;
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
/* Register VF-PF mailbox interrupt handler */
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for PFVF mbox0 irq\n");
return ret;
}
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
/* Register VF FLR interrupt handler */
ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for VFFLR0 irq\n");
goto free_mbox0_irq;
}
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
/* Register VF ME interrupt handler */
ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for PFVF mbox0 irq\n");
goto free_flr0_irq;
}
if (num_vfs > 64) {
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
"CPTVFPF Mbox1", cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for PFVF mbox1 irq\n");
goto free_me0_irq;
}
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
/* Register VF FLR interrupt handler */
ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for VFFLR1 irq\n");
goto free_mbox1_irq;
}
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
/* Register VF FLR interrupt handler */
ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for VFFLR1 irq\n");
goto free_flr1_irq;
}
}
cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
return 0;
free_flr1_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
free_irq(vector, cptpf);
free_mbox1_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
free_irq(vector, cptpf);
free_me0_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
free_irq(vector, cptpf);
free_flr0_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
free_irq(vector, cptpf);
free_mbox0_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
free_irq(vector, cptpf);
return ret;
}
static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
{
if (!pf->flr_wq)
return;
destroy_workqueue(pf->flr_wq);
pf->flr_wq = NULL;
kfree(pf->flr_work);
}
static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
{
int vf;
cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
if (!cptpf->flr_wq)
return -ENOMEM;
cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
GFP_KERNEL);
if (!cptpf->flr_work)
goto destroy_wq;
for (vf = 0; vf < num_vfs; vf++) {
cptpf->flr_work[vf].pf = cptpf;
INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
}
return 0;
destroy_wq:
destroy_workqueue(cptpf->flr_wq);
return -ENOMEM;
}
static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
{
struct device *dev = &cptpf->pdev->dev;
u64 vfpf_mbox_base;
int err, i;
cptpf->vfpf_mbox_wq =
alloc_ordered_workqueue("cpt_vfpf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!cptpf->vfpf_mbox_wq)
return -ENOMEM;
/* Map VF-PF mailbox memory */
if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
else
vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
if (!vfpf_mbox_base) {
dev_err(dev, "VF-PF mailbox address not configured\n");
err = -ENOMEM;
goto free_wqe;
}
cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
MBOX_SIZE * cptpf->max_vfs);
if (!cptpf->vfpf_mbox_base) {
dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
err = -ENOMEM;
goto free_wqe;
}
err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
num_vfs);
if (err)
goto free_wqe;
for (i = 0; i < num_vfs; i++) {
cptpf->vf[i].vf_id = i;
cptpf->vf[i].cptpf = cptpf;
cptpf->vf[i].intr_idx = i % 64;
INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
otx2_cptpf_vfpf_mbox_handler);
}
return 0;
free_wqe:
destroy_workqueue(cptpf->vfpf_mbox_wq);
return err;
}
static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
{
destroy_workqueue(cptpf->vfpf_mbox_wq);
otx2_mbox_destroy(&cptpf->vfpf_mbox);
}
static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
{
/* Disable AF-PF interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
0x1ULL);
/* Clear interrupt if any */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
}
static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
struct device *dev = &pdev->dev;
int ret, irq;
irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
/* Register AF-PF mailbox interrupt handler */
ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
"CPTAFPF Mbox", cptpf);
if (ret) {
dev_err(dev,
"IRQ registration failed for PFAF mbox irq\n");
return ret;
}
/* Clear interrupt if any, to avoid spurious interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
/* Enable AF-PF interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
0x1ULL);
ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
if (ret) {
dev_warn(dev,
"AF not responding to mailbox, deferring probe\n");
cptpf_disable_afpf_mbox_intr(cptpf);
return -EPROBE_DEFER;
}
return 0;
}
static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
resource_size_t offset;
int err;
cptpf->afpf_mbox_wq =
alloc_ordered_workqueue("cpt_afpf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!cptpf->afpf_mbox_wq)
return -ENOMEM;
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
/* Map AF-PF mailbox memory */
cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
if (!cptpf->afpf_mbox_base) {
dev_err(&pdev->dev, "Unable to map BAR4\n");
err = -ENOMEM;
goto error;
}
err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
if (err)
goto error;
err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
if (err)
goto mbox_cleanup;
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
mutex_init(&cptpf->lock);
return 0;
mbox_cleanup:
otx2_mbox_destroy(&cptpf->afpf_mbox);
error:
destroy_workqueue(cptpf->afpf_mbox_wq);
return err;
}
static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
{
destroy_workqueue(cptpf->afpf_mbox_wq);
otx2_mbox_destroy(&cptpf->afpf_mbox);
otx2_mbox_destroy(&cptpf->afpf_mbox_up);
}
static ssize_t sso_pf_func_ovrd_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
}
static ssize_t sso_pf_func_ovrd_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
u8 sso_pf_func_ovrd;
if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
return count;
if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
return -EINVAL;
cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
return count;
}
static ssize_t kvf_limits_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", cptpf->kvf_limits);
}
static ssize_t kvf_limits_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
int lfs_num;
int ret;
ret = kstrtoint(buf, 0, &lfs_num);
if (ret)
return ret;
if (lfs_num < 1 || lfs_num > num_online_cpus()) {
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
lfs_num, num_online_cpus());
return -EINVAL;
}
cptpf->kvf_limits = lfs_num;
return count;
}
static DEVICE_ATTR_RW(kvf_limits);
static DEVICE_ATTR_RW(sso_pf_func_ovrd);
static struct attribute *cptpf_attrs[] = {
&dev_attr_kvf_limits.attr,
&dev_attr_sso_pf_func_ovrd.attr,
NULL
};
static const struct attribute_group cptpf_sysfs_group = {
.attrs = cptpf_attrs,
};
static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
{
u64 rev;
rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
rev = (rev >> 12) & 0xFF;
/*
* Check if AF has setup revision for RVUM block, otherwise
* driver probe should be deferred until AF driver comes up
*/
if (!rev) {
dev_warn(&cptpf->pdev->dev,
"AF is not initialized, deferring probe\n");
return -EPROBE_DEFER;
}
return 0;
}
static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
{
int timeout = 10, ret;
u64 reg = 0;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_BLK_RST, 0x1, blkaddr);
if (ret)
return ret;
do {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_BLK_RST, ®, blkaddr);
if (ret)
return ret;
if (!((reg >> 63) & 0x1))
break;
usleep_range(10000, 20000);
if (timeout-- < 0)
return -EBUSY;
} while (1);
return ret;
}
static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
{
int ret = 0;
if (cptpf->has_cpt1) {
ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_device_reset(cptpf, BLKADDR_CPT0);
}
static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
{
u64 cfg;
cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
if (cfg & BIT_ULL(11))
cptpf->has_cpt1 = true;
}
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
{
union otx2_cptx_af_constants1 af_cnsts1 = {0};
int ret = 0;
/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
cptpf_check_block_implemented(cptpf);
/* Reset the CPT PF device */
ret = cptpf_device_reset(cptpf);
if (ret)
return ret;
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_CONSTANTS1, &af_cnsts1.u,
BLKADDR_CPT0);
if (ret)
return ret;
cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
/* Disable all cores */
ret = otx2_cpt_disable_all_cores(cptpf);
return ret;
}
static int cptpf_sriov_disable(struct pci_dev *pdev)
{
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
int num_vfs = pci_num_vf(pdev);
if (!num_vfs)
return 0;
pci_disable_sriov(pdev);
cptpf_unregister_vfpf_intr(cptpf, num_vfs);
cptpf_flr_wq_destroy(cptpf);
cptpf_vfpf_mbox_destroy(cptpf);
module_put(THIS_MODULE);
cptpf->enabled_vfs = 0;
return 0;
}
static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
int ret;
/* Initialize VF<=>PF mailbox */
ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
if (ret)
return ret;
ret = cptpf_flr_wq_init(cptpf, num_vfs);
if (ret)
goto destroy_mbox;
/* Register VF<=>PF mailbox interrupt */
ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
if (ret)
goto destroy_flr;
/* Get CPT HW capabilities using LOAD_FVC operation. */
ret = otx2_cpt_discover_eng_capabilities(cptpf);
if (ret)
goto disable_intr;
ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
if (ret)
goto disable_intr;
cptpf->enabled_vfs = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret)
goto disable_intr;
dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
try_module_get(THIS_MODULE);
return num_vfs;
disable_intr:
cptpf_unregister_vfpf_intr(cptpf, num_vfs);
cptpf->enabled_vfs = 0;
destroy_flr:
cptpf_flr_wq_destroy(cptpf);
destroy_mbox:
cptpf_vfpf_mbox_destroy(cptpf);
return ret;
}
static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs > 0) {
return cptpf_sriov_enable(pdev, num_vfs);
} else {
return cptpf_sriov_disable(pdev);
}
}
static int otx2_cptpf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct otx2_cptpf_dev *cptpf;
int err;
cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
if (!cptpf)
return -ENOMEM;
err = pcim_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
goto clear_drvdata;
}
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
/* Map PF's configuration registers */
err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
OTX2_CPT_DRV_NAME);
if (err) {
dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
goto clear_drvdata;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, cptpf);
cptpf->pdev = pdev;
cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
/* Check if AF driver is up, otherwise defer probe */
err = cpt_is_pf_usable(cptpf);
if (err)
goto clear_drvdata;
err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "Request for %d msix vectors failed\n",
RVU_PF_INT_VEC_CNT);
goto clear_drvdata;
}
otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
/* Initialize AF-PF mailbox */
err = cptpf_afpf_mbox_init(cptpf);
if (err)
goto clear_drvdata;
/* Register mailbox interrupt */
err = cptpf_register_afpf_mbox_intr(cptpf);
if (err)
goto destroy_afpf_mbox;
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize CPT PF device */
err = cptpf_device_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize engine groups */
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
if (err)
goto unregister_intr;
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
goto cleanup_eng_grps;
err = otx2_cpt_register_dl(cptpf);
if (err)
goto sysfs_grp_del;
return 0;
sysfs_grp_del:
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
cptpf_afpf_mbox_destroy(cptpf);
clear_drvdata:
pci_set_drvdata(pdev, NULL);
return err;
}
static void otx2_cptpf_remove(struct pci_dev *pdev)
{
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
if (!cptpf)
return;
cptpf_sriov_disable(pdev);
otx2_cpt_unregister_dl(cptpf);
/* Delete sysfs entry created for kernel VF limits */
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
/* Cleanup engine groups */
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
/* Disable AF-PF mailbox interrupt */
cptpf_disable_afpf_mbox_intr(cptpf);
/* Destroy AF-PF mbox */
cptpf_afpf_mbox_destroy(cptpf);
pci_set_drvdata(pdev, NULL);
}
/* Supported devices */
static const struct pci_device_id otx2_cpt_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
{ 0, } /* end of table */
};
static struct pci_driver otx2_cpt_pci_driver = {
.name = OTX2_CPT_DRV_NAME,
.id_table = otx2_cpt_id_table,
.probe = otx2_cptpf_probe,
.remove = otx2_cptpf_remove,
.sriov_configure = otx2_cptpf_sriov_configure
};
module_pci_driver(otx2_cpt_pci_driver);
MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include "otx2_cpt_common.h"
#include "otx2_cptlf.h"
int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
int ret;
otx2_mbox_msg_send(mbox, 0);
ret = otx2_mbox_wait_for_rsp(mbox, 0);
if (ret == -EIO) {
dev_err(&pdev->dev, "RVU MBOX timeout.\n");
return ret;
} else if (ret) {
dev_err(&pdev->dev, "RVU MBOX error: %d.\n", ret);
return -EFAULT;
}
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
struct mbox_msghdr *req;
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct ready_msg_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->id = MBOX_MSG_READY;
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = 0;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
struct pci_dev *pdev, u64 reg,
u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
reg_msg = (struct cpt_rd_wr_reg_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
sizeof(*reg_msg));
if (reg_msg == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
reg_msg->hdr.pcifunc = 0;
reg_msg->is_write = 0;
reg_msg->reg_offset = reg;
reg_msg->ret_val = val;
reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
reg_msg = (struct cpt_rd_wr_reg_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
sizeof(*reg_msg));
if (reg_msg == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
reg_msg->hdr.pcifunc = 0;
reg_msg->is_write = 1;
reg_msg->reg_offset = reg;
reg_msg->val = val;
reg_msg->blkaddr = blkaddr;
return 0;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr)
{
int ret;
ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
{
int ret;
ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
{
struct otx2_mbox *mbox = lfs->mbox;
struct rsrc_attach *req;
int ret;
req = (struct rsrc_attach *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msg_rsp));
if (req == NULL) {
dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_ATTACH_RESOURCES;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = 0;
req->cptlfs = lfs->lfs_num;
req->cpt_blkaddr = lfs->blkaddr;
req->modify = 1;
ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
if (ret)
return ret;
if (!lfs->are_lfs_attached)
ret = -EINVAL;
return ret;
}
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
{
struct otx2_mbox *mbox = lfs->mbox;
struct rsrc_detach *req;
int ret;
req = (struct rsrc_detach *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msg_rsp));
if (req == NULL) {
dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = 0;
req->cptlfs = 1;
ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
if (ret)
return ret;
if (lfs->are_lfs_attached)
ret = -EINVAL;
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
{
struct otx2_mbox *mbox = lfs->mbox;
struct pci_dev *pdev = lfs->pdev;
struct mbox_msghdr *req;
int ret, i;
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msix_offset_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->id = MBOX_MSG_MSIX_OFFSET;
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = 0;
ret = otx2_cpt_send_mbox_msg(mbox, pdev);
if (ret)
return ret;
for (i = 0; i < lfs->lfs_num; i++) {
if (lfs->lf[i].msix_offset == MSIX_VECTOR_INVALID) {
dev_err(&pdev->dev,
"Invalid msix offset %d for LF %d\n",
lfs->lf[i].msix_offset, i);
return -EINVAL;
}
}
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
{
int err;
if (!otx2_mbox_nonempty(mbox, 0))
return 0;
otx2_mbox_msg_send(mbox, 0);
err = otx2_mbox_wait_for_rsp(mbox, 0);
if (err)
return err;
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include <linux/ctype.h>
#include <linux/firmware.h>
#include "otx2_cptpf_ucode.h"
#include "otx2_cpt_common.h"
#include "otx2_cptpf.h"
#include "otx2_cptlf.h"
#include "otx2_cpt_reqmgr.h"
#include "rvu_reg.h"
#define CSR_DELAY 30
#define LOADFVC_RLEN 8
#define LOADFVC_MAJOR_OP 0x01
#define LOADFVC_MINOR_OP 0x08
#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
struct fw_info_t {
struct list_head ucodes;
};
static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
struct otx2_cpt_eng_grp_info *eng_grp)
{
struct otx2_cpt_bitmap bmap = { {0} };
bool found = false;
int i;
if (eng_grp->g->engs_num < 0 ||
eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx2\n",
eng_grp->g->engs_num);
return bmap;
}
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
if (eng_grp->engs[i].type) {
bitmap_or(bmap.bits, bmap.bits,
eng_grp->engs[i].bmap,
eng_grp->g->engs_num);
bmap.size = eng_grp->g->engs_num;
found = true;
}
}
if (!found)
dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx);
return bmap;
}
static int is_eng_type(int val, int eng_type)
{
return val & (1 << eng_type);
}
static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
{
if (eng_grp->ucode[1].type)
return true;
else
return false;
}
static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
const char *filename)
{
strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
{
char *str = "unknown";
switch (eng_type) {
case OTX2_CPT_SE_TYPES:
str = "SE";
break;
case OTX2_CPT_IE_TYPES:
str = "IE";
break;
case OTX2_CPT_AE_TYPES:
str = "AE";
break;
}
return str;
}
static char *get_ucode_type_str(int ucode_type)
{
char *str = "unknown";
switch (ucode_type) {
case (1 << OTX2_CPT_SE_TYPES):
str = "SE";
break;
case (1 << OTX2_CPT_IE_TYPES):
str = "IE";
break;
case (1 << OTX2_CPT_AE_TYPES):
str = "AE";
break;
case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
str = "SE+IPSEC";
break;
}
return str;
}
static int get_ucode_type(struct device *dev,
struct otx2_cpt_ucode_hdr *ucode_hdr,
int *ucode_type)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
struct pci_dev *pdev = cptpf->pdev;
int i, val = 0;
u8 nn;
strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
return -EINVAL;
nn = ucode_hdr->ver_num.nn;
if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
(nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
nn == OTX2_CPT_SE_UC_TYPE3))
val |= 1 << OTX2_CPT_SE_TYPES;
if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
(nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
nn == OTX2_CPT_IE_UC_TYPE3))
val |= 1 << OTX2_CPT_IE_TYPES;
if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
nn == OTX2_CPT_AE_UC_TYPE)
val |= 1 << OTX2_CPT_AE_TYPES;
*ucode_type = val;
if (!val)
return -EINVAL;
return 0;
}
static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
dma_addr_t dma_addr, int blkaddr)
{
return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_UCODE_BASE(eng),
(u64)dma_addr, blkaddr);
}
static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
struct otx2_cptpf_dev *cptpf, int blkaddr)
{
struct otx2_cpt_engs_rsvd *engs;
dma_addr_t dma_addr;
int i, bit, ret;
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
if (ret)
return ret;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
dma_addr = engs->ucode->dma;
/*
* Set UCODE_BASE only for the cores which are not used,
* other cores should have already valid UCODE_BASE set
*/
for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
if (!eng_grp->g->eng_ref_cnt[bit]) {
ret = __write_ucode_base(cptpf, bit, dma_addr,
blkaddr);
if (ret)
return ret;
}
}
return 0;
}
static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
int ret;
if (cptpf->has_cpt1) {
ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
}
static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_bitmap bmap,
int blkaddr)
{
int i, timeout = 10;
int busy, ret;
u64 reg = 0;
/* Detach the cores from group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), ®, blkaddr);
if (ret)
return ret;
if (reg & (1ull << eng_grp->idx)) {
eng_grp->g->eng_ref_cnt[i]--;
reg &= ~(1ull << eng_grp->idx);
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL2(i), reg,
blkaddr);
if (ret)
return ret;
}
}
/* Wait for cores to become idle */
do {
busy = 0;
usleep_range(10000, 20000);
if (timeout-- < 0)
return -EBUSY;
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_STS(i), ®,
blkaddr);
if (ret)
return ret;
if (reg & 0x1) {
busy = 1;
break;
}
}
} while (busy);
/* Disable the cores only if they are not used anymore */
for_each_set_bit(i, bmap.bits, bmap.size) {
if (!eng_grp->g->eng_ref_cnt[i]) {
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x0,
blkaddr);
if (ret)
return ret;
}
}
return 0;
}
static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
if (cptpf->has_cpt1) {
ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT0);
}
static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_bitmap bmap,
int blkaddr)
{
u64 reg = 0;
int i, ret;
/* Attach the cores to the group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), ®, blkaddr);
if (ret)
return ret;
if (!(reg & (1ull << eng_grp->idx))) {
eng_grp->g->eng_ref_cnt[i]++;
reg |= 1ull << eng_grp->idx;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL2(i), reg,
blkaddr);
if (ret)
return ret;
}
}
/* Enable the cores */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x1,
blkaddr);
if (ret)
return ret;
}
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
if (cptpf->has_cpt1) {
ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
char *filename)
{
struct otx2_cpt_ucode_hdr *ucode_hdr;
struct otx2_cpt_uc_info_t *uc_info;
int ucode_type, ucode_size;
int ret;
uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
if (!uc_info)
return -ENOMEM;
ret = request_firmware(&uc_info->fw, filename, dev);
if (ret)
goto free_uc_info;
ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
if (ret)
goto release_fw;
ucode_size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode_size) {
dev_err(dev, "Ucode %s invalid size\n", filename);
ret = -EINVAL;
goto release_fw;
}
set_ucode_filename(&uc_info->ucode, filename);
memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
OTX2_CPT_UCODE_VER_STR_SZ);
uc_info->ucode.ver_num = ucode_hdr->ver_num;
uc_info->ucode.type = ucode_type;
uc_info->ucode.size = ucode_size;
list_add_tail(&uc_info->list, &fw_info->ucodes);
return 0;
release_fw:
release_firmware(uc_info->fw);
free_uc_info:
kfree(uc_info);
return ret;
}
static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
{
struct otx2_cpt_uc_info_t *curr, *temp;
if (!fw_info)
return;
list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
list_del(&curr->list);
release_firmware(curr->fw);
kfree(curr);
}
}
static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
int ucode_type)
{
struct otx2_cpt_uc_info_t *curr;
list_for_each_entry(curr, &fw_info->ucodes, list) {
if (!is_eng_type(curr->ucode.type, ucode_type))
continue;
return curr;
}
return NULL;
}
static void print_uc_info(struct fw_info_t *fw_info)
{
struct otx2_cpt_uc_info_t *curr;
list_for_each_entry(curr, &fw_info->ucodes, list) {
pr_debug("Ucode filename %s\n", curr->ucode.filename);
pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
pr_debug("Ucode version %d.%d.%d.%d\n",
curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
get_ucode_type_str(curr->ucode.type));
pr_debug("Ucode size %d\n", curr->ucode.size);
pr_debug("Ucode ptr %p\n", curr->fw->data);
}
}
static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
{
char filename[OTX2_CPT_NAME_LENGTH];
char eng_type[8] = {0};
int ret, e, i;
INIT_LIST_HEAD(&fw_info->ucodes);
for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
strcpy(eng_type, get_eng_type_str(e));
for (i = 0; i < strlen(eng_type); i++)
eng_type[i] = tolower(eng_type[i]);
snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
pdev->revision, eng_type);
/* Request firmware for each engine type */
ret = load_fw(&pdev->dev, fw_info, filename);
if (ret)
goto release_fw;
}
print_uc_info(fw_info);
return 0;
release_fw:
cpt_ucode_release_fw(fw_info);
return ret;
}
struct otx2_cpt_engs_rsvd *find_engines_by_type(
struct otx2_cpt_eng_grp_info *eng_grp,
int eng_type)
{
int i;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!eng_grp->engs[i].type)
continue;
if (eng_grp->engs[i].type == eng_type)
return &eng_grp->engs[i];
}
return NULL;
}
static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
int eng_type)
{
struct otx2_cpt_engs_rsvd *engs;
engs = find_engines_by_type(eng_grp, eng_type);
return (engs != NULL ? 1 : 0);
}
static int update_engines_avail_count(struct device *dev,
struct otx2_cpt_engs_available *avail,
struct otx2_cpt_engs_rsvd *engs, int val)
{
switch (engs->type) {
case OTX2_CPT_SE_TYPES:
avail->se_cnt += val;
break;
case OTX2_CPT_IE_TYPES:
avail->ie_cnt += val;
break;
case OTX2_CPT_AE_TYPES:
avail->ae_cnt += val;
break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
return 0;
}
static int update_engines_offset(struct device *dev,
struct otx2_cpt_engs_available *avail,
struct otx2_cpt_engs_rsvd *engs)
{
switch (engs->type) {
case OTX2_CPT_SE_TYPES:
engs->offset = 0;
break;
case OTX2_CPT_IE_TYPES:
engs->offset = avail->max_se_cnt;
break;
case OTX2_CPT_AE_TYPES:
engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
return 0;
}
static int release_engines(struct device *dev,
struct otx2_cpt_eng_grp_info *grp)
{
int i, ret = 0;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!grp->engs[i].type)
continue;
if (grp->engs[i].count > 0) {
ret = update_engines_avail_count(dev, &grp->g->avail,
&grp->engs[i],
grp->engs[i].count);
if (ret)
return ret;
}
grp->engs[i].type = 0;
grp->engs[i].count = 0;
grp->engs[i].offset = 0;
grp->engs[i].ucode = NULL;
bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
}
return 0;
}
static int do_reserve_engines(struct device *dev,
struct otx2_cpt_eng_grp_info *grp,
struct otx2_cpt_engines *req_engs)
{
struct otx2_cpt_engs_rsvd *engs = NULL;
int i, ret;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!grp->engs[i].type) {
engs = &grp->engs[i];
break;
}
}
if (!engs)
return -ENOMEM;
engs->type = req_engs->type;
engs->count = req_engs->count;
ret = update_engines_offset(dev, &grp->g->avail, engs);
if (ret)
return ret;
if (engs->count > 0) {
ret = update_engines_avail_count(dev, &grp->g->avail, engs,
-engs->count);
if (ret)
return ret;
}
return 0;
}
static int check_engines_availability(struct device *dev,
struct otx2_cpt_eng_grp_info *grp,
struct otx2_cpt_engines *req_eng)
{
int avail_cnt = 0;
switch (req_eng->type) {
case OTX2_CPT_SE_TYPES:
avail_cnt = grp->g->avail.se_cnt;
break;
case OTX2_CPT_IE_TYPES:
avail_cnt = grp->g->avail.ie_cnt;
break;
case OTX2_CPT_AE_TYPES:
avail_cnt = grp->g->avail.ae_cnt;
break;
default:
dev_err(dev, "Invalid engine type %d\n", req_eng->type);
return -EINVAL;
}
if (avail_cnt < req_eng->count) {
dev_err(dev,
"Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count);
return -EBUSY;
}
return 0;
}
static int reserve_engines(struct device *dev,
struct otx2_cpt_eng_grp_info *grp,
struct otx2_cpt_engines *req_engs, int ucodes_cnt)
{
int i, ret = 0;
/* Validate if a number of requested engines are available */
for (i = 0; i < ucodes_cnt; i++) {
ret = check_engines_availability(dev, grp, &req_engs[i]);
if (ret)
return ret;
}
/* Reserve requested engines for this engine group */
for (i = 0; i < ucodes_cnt; i++) {
ret = do_reserve_engines(dev, grp, &req_engs[i]);
if (ret)
return ret;
}
return 0;
}
static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
{
if (ucode->va) {
dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
ucode->dma);
ucode->va = NULL;
ucode->dma = 0;
ucode->size = 0;
}
memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
set_ucode_filename(ucode, "");
ucode->type = 0;
}
static int copy_ucode_to_dma_mem(struct device *dev,
struct otx2_cpt_ucode *ucode,
const u8 *ucode_data)
{
u32 i;
/* Allocate DMAable space */
ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
GFP_KERNEL);
if (!ucode->va)
return -ENOMEM;
memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
ucode->size);
/* Byte swap 64-bit */
for (i = 0; i < (ucode->size / 8); i++)
cpu_to_be64s(&((u64 *)ucode->va)[i]);
/* Ucode needs 16-bit swap */
for (i = 0; i < (ucode->size / 2); i++)
cpu_to_be16s(&((u16 *)ucode->va)[i]);
return 0;
}
static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
{
int ret;
/* Point microcode to each core of the group */
ret = cpt_set_ucode_base(eng_grp, obj);
if (ret)
return ret;
/* Attach the cores to the group and enable them */
ret = cpt_attach_and_enable_cores(eng_grp, obj);
return ret;
}
static int disable_eng_grp(struct device *dev,
struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
{
int i, ret;
/* Disable all engines used by this group */
ret = cpt_detach_and_disable_cores(eng_grp, obj);
if (ret)
return ret;
/* Unload ucode used by this engine group */
ucode_unload(dev, &eng_grp->ucode[0]);
ucode_unload(dev, &eng_grp->ucode[1]);
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!eng_grp->engs[i].type)
continue;
eng_grp->engs[i].ucode = &eng_grp->ucode[0];
}
/* Clear UCODE_BASE register for each engine used by this group */
ret = cpt_set_ucode_base(eng_grp, obj);
return ret;
}
static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
struct otx2_cpt_eng_grp_info *src_grp)
{
/* Setup fields for engine group which is mirrored */
src_grp->mirror.is_ena = false;
src_grp->mirror.idx = 0;
src_grp->mirror.ref_count++;
/* Setup fields for mirroring engine group */
dst_grp->mirror.is_ena = true;
dst_grp->mirror.idx = src_grp->idx;
dst_grp->mirror.ref_count = 0;
}
static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
{
struct otx2_cpt_eng_grp_info *src_grp;
if (!dst_grp->mirror.is_ena)
return;
src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
src_grp->mirror.ref_count--;
dst_grp->mirror.is_ena = false;
dst_grp->mirror.idx = 0;
dst_grp->mirror.ref_count = 0;
}
static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
struct otx2_cpt_engines *engs, int engs_cnt)
{
struct otx2_cpt_engs_rsvd *mirrored_engs;
int i;
for (i = 0; i < engs_cnt; i++) {
mirrored_engs = find_engines_by_type(mirror_eng_grp,
engs[i].type);
if (!mirrored_engs)
continue;
/*
* If mirrored group has this type of engines attached then
* there are 3 scenarios possible:
* 1) mirrored_engs.count == engs[i].count then all engines
* from mirrored engine group will be shared with this engine
* group
* 2) mirrored_engs.count > engs[i].count then only a subset of
* engines from mirrored engine group will be shared with this
* engine group
* 3) mirrored_engs.count < engs[i].count then all engines
* from mirrored engine group will be shared with this group
* and additional engines will be reserved for exclusively use
* by this engine group
*/
engs[i].count -= mirrored_engs->count;
}
}
static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
struct otx2_cpt_eng_grp_info *grp)
{
struct otx2_cpt_eng_grps *eng_grps = grp->g;
int i;
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
if (!eng_grps->grp[i].is_enabled)
continue;
if (eng_grps->grp[i].ucode[0].type &&
eng_grps->grp[i].ucode[1].type)
continue;
if (grp->idx == i)
continue;
if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
grp->ucode[0].ver_str,
OTX2_CPT_UCODE_VER_STR_SZ))
return &eng_grps->grp[i];
}
return NULL;
}
static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
struct otx2_cpt_eng_grps *eng_grps)
{
int i;
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
if (!eng_grps->grp[i].is_enabled)
return &eng_grps->grp[i];
}
return NULL;
}
static int eng_grp_update_masks(struct device *dev,
struct otx2_cpt_eng_grp_info *eng_grp)
{
struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
struct otx2_cpt_bitmap tmp_bmap = { {0} };
int i, j, cnt, max_cnt;
int bit;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
if (engs->count <= 0)
continue;
switch (engs->type) {
case OTX2_CPT_SE_TYPES:
max_cnt = eng_grp->g->avail.max_se_cnt;
break;
case OTX2_CPT_IE_TYPES:
max_cnt = eng_grp->g->avail.max_ie_cnt;
break;
case OTX2_CPT_AE_TYPES:
max_cnt = eng_grp->g->avail.max_ae_cnt;
break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
cnt = engs->count;
WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
for (j = engs->offset; j < engs->offset + max_cnt; j++) {
if (!eng_grp->g->eng_ref_cnt[j]) {
bitmap_set(tmp_bmap.bits, j, 1);
cnt--;
if (!cnt)
break;
}
}
if (cnt)
return -ENOSPC;
bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
}
if (!eng_grp->mirror.is_ena)
return 0;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
mirrored_engs = find_engines_by_type(
&eng_grp->g->grp[eng_grp->mirror.idx],
engs->type);
WARN_ON(!mirrored_engs && engs->count <= 0);
if (!mirrored_engs)
continue;
bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
eng_grp->g->engs_num);
if (engs->count < 0) {
bit = find_first_bit(mirrored_engs->bmap,
eng_grp->g->engs_num);
bitmap_clear(tmp_bmap.bits, bit, -engs->count);
}
bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
eng_grp->g->engs_num);
}
return 0;
}
static int delete_engine_group(struct device *dev,
struct otx2_cpt_eng_grp_info *eng_grp)
{
int ret;
if (!eng_grp->is_enabled)
return 0;
if (eng_grp->mirror.ref_count)
return -EINVAL;
/* Removing engine group mirroring if enabled */
remove_eng_grp_mirroring(eng_grp);
/* Disable engine group */
ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
if (ret)
return ret;
/* Release all engines held by this engine group */
ret = release_engines(dev, eng_grp);
if (ret)
return ret;
eng_grp->is_enabled = false;
return 0;
}
static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
{
struct otx2_cpt_ucode *ucode;
if (eng_grp->mirror.is_ena)
ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
else
ucode = &eng_grp->ucode[0];
WARN_ON(!eng_grp->engs[0].type);
eng_grp->engs[0].ucode = ucode;
if (eng_grp->engs[1].type) {
if (is_2nd_ucode_used(eng_grp))
eng_grp->engs[1].ucode = &eng_grp->ucode[1];
else
eng_grp->engs[1].ucode = ucode;
}
}
static int create_engine_group(struct device *dev,
struct otx2_cpt_eng_grps *eng_grps,
struct otx2_cpt_engines *engs, int ucodes_cnt,
void *ucode_data[], int is_print)
{
struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
struct otx2_cpt_eng_grp_info *eng_grp;
struct otx2_cpt_uc_info_t *uc_info;
int i, ret = 0;
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps);
if (!eng_grp) {
dev_err(dev, "Error all engine groups are being used\n");
return -ENOSPC;
}
/* Load ucode */
for (i = 0; i < ucodes_cnt; i++) {
uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
eng_grp->ucode[i] = uc_info->ucode;
ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
uc_info->fw->data);
if (ret)
goto unload_ucode;
}
/* Check if this group mirrors another existing engine group */
mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
if (mirrored_eng_grp) {
/* Setup mirroring */
setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
/*
* Update count of requested engines because some
* of them might be shared with mirrored group
*/
update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
}
ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
if (ret)
goto unload_ucode;
/* Update ucode pointers used by engines */
update_ucode_ptrs(eng_grp);
/* Update engine masks used by this group */
ret = eng_grp_update_masks(dev, eng_grp);
if (ret)
goto release_engs;
/* Enable engine group */
ret = enable_eng_grp(eng_grp, eng_grps->obj);
if (ret)
goto release_engs;
/*
* If this engine group mirrors another engine group
* then we need to unload ucode as we will use ucode
* from mirrored engine group
*/
if (eng_grp->mirror.is_ena)
ucode_unload(dev, &eng_grp->ucode[0]);
eng_grp->is_enabled = true;
if (!is_print)
return 0;
if (mirrored_eng_grp)
dev_info(dev,
"Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx);
else
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
if (is_2nd_ucode_used(eng_grp))
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[1].ver_str);
return 0;
release_engs:
release_engines(dev, eng_grp);
unload_ucode:
ucode_unload(dev, &eng_grp->ucode[0]);
ucode_unload(dev, &eng_grp->ucode[1]);
return ret;
}
static void delete_engine_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
int i;
/* First delete all mirroring engine groups */
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
if (eng_grps->grp[i].mirror.is_ena)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
/* Delete remaining engine groups */
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
}
#define PCI_DEVID_CN10K_RNM 0xA098
#define RNM_ENTROPY_STATUS 0x8
static void rnm_to_cpt_errata_fixup(struct device *dev)
{
struct pci_dev *pdev;
void __iomem *base;
int timeout = 5000;
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
if (!pdev)
return;
base = pci_ioremap_bar(pdev, 0);
if (!base)
goto put_pdev;
while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
cpu_relax();
udelay(1);
timeout--;
if (!timeout) {
dev_warn(dev, "RNM is not producing entropy\n");
break;
}
}
iounmap(base);
put_pdev:
pci_dev_put(pdev);
}
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
{
int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
struct otx2_cpt_eng_grp_info *grp;
int i;
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
if (!grp->is_enabled)
continue;
if (eng_type == OTX2_CPT_SE_TYPES) {
if (eng_grp_has_eng_type(grp, eng_type) &&
!eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
eng_grp_num = i;
break;
}
} else {
if (eng_grp_has_eng_type(grp, eng_type)) {
eng_grp_num = i;
break;
}
}
}
return eng_grp_num;
}
int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_eng_grps *eng_grps)
{
struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
u64 reg_val;
int ret = 0;
mutex_lock(&eng_grps->lock);
/*
* We don't create engine groups if it was already
* made (when user enabled VFs for the first time)
*/
if (eng_grps->is_grps_created)
goto unlock;
ret = cpt_ucode_load_fw(pdev, &fw_info);
if (ret)
goto unlock;
/*
* Create engine group with SE engines for kernel
* crypto functionality (symmetric crypto)
*/
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for SE\n");
ret = -EINVAL;
goto release_fw;
}
engs[0].type = OTX2_CPT_SE_TYPES;
engs[0].count = eng_grps->avail.max_se_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 1);
if (ret)
goto release_fw;
/*
* Create engine group with SE+IE engines for IPSec.
* All SE engines will be shared with engine group 0.
*/
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
if (uc_info[1] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for IE");
ret = -EINVAL;
goto delete_eng_grp;
}
engs[0].type = OTX2_CPT_SE_TYPES;
engs[0].count = eng_grps->avail.max_se_cnt;
engs[1].type = OTX2_CPT_IE_TYPES;
engs[1].count = eng_grps->avail.max_ie_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
(void **) uc_info, 1);
if (ret)
goto delete_eng_grp;
/*
* Create engine group with AE engines for asymmetric
* crypto functionality.
*/
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for AE");
ret = -EINVAL;
goto delete_eng_grp;
}
engs[0].type = OTX2_CPT_AE_TYPES;
engs[0].count = eng_grps->avail.max_ae_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 1);
if (ret)
goto delete_eng_grp;
eng_grps->is_grps_created = true;
cpt_ucode_release_fw(&fw_info);
if (is_dev_otx2(pdev))
goto unlock;
/*
* Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
* CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
*/
rnm_to_cpt_errata_fixup(&pdev->dev);
/*
* Configure engine group mask to allow context prefetching
* for the groups and enable random number request, to enable
* CPT to request random numbers from RNM.
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
BLKADDR_CPT0);
/*
* Set interval to periodically flush dirty data for the next
* CTX cache entry. Set the interval count to maximum supported
* value.
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
/*
* Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
* CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
* encounters a fault/poison, a rare case may result in
* unpredictable data being delivered to a CPT engine.
*/
otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, ®_val,
BLKADDR_CPT0);
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
reg_val | BIT_ULL(24), BLKADDR_CPT0);
mutex_unlock(&eng_grps->lock);
return 0;
delete_eng_grp:
delete_engine_grps(pdev, eng_grps);
release_fw:
cpt_ucode_release_fw(&fw_info);
unlock:
mutex_unlock(&eng_grps->lock);
return ret;
}
static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
int blkaddr)
{
int timeout = 10, ret;
int i, busy;
u64 reg;
/* Disengage the cores from groups */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), 0x0,
blkaddr);
if (ret)
return ret;
cptpf->eng_grps.eng_ref_cnt[i] = 0;
}
ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
if (ret)
return ret;
/* Wait for cores to become idle */
do {
busy = 0;
usleep_range(10000, 20000);
if (timeout-- < 0)
return -EBUSY;
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_STS(i), ®,
blkaddr);
if (ret)
return ret;
if (reg & 0x1) {
busy = 1;
break;
}
}
} while (busy);
/* Disable the cores */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x0,
blkaddr);
if (ret)
return ret;
}
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
{
int total_cores, ret;
total_cores = cptpf->eng_grps.avail.max_se_cnt +
cptpf->eng_grps.avail.max_ie_cnt +
cptpf->eng_grps.avail.max_ae_cnt;
if (cptpf->has_cpt1) {
ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
}
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
struct otx2_cpt_eng_grp_info *grp;
int i, j;
mutex_lock(&eng_grps->lock);
delete_engine_grps(pdev, eng_grps);
/* Release memory */
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
kfree(grp->engs[j].bmap);
grp->engs[j].bmap = NULL;
}
}
mutex_unlock(&eng_grps->lock);
}
int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
struct otx2_cpt_eng_grp_info *grp;
int i, j, ret;
mutex_init(&eng_grps->lock);
eng_grps->obj = pci_get_drvdata(pdev);
eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
eng_grps->engs_num = eng_grps->avail.max_se_cnt +
eng_grps->avail.max_ie_cnt +
eng_grps->avail.max_ae_cnt;
if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
dev_err(&pdev->dev,
"Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
ret = -EINVAL;
goto cleanup_eng_grps;
}
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
grp->g = eng_grps;
grp->idx = i;
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
grp->engs[j].bmap =
kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
sizeof(long), GFP_KERNEL);
if (!grp->engs[j].bmap) {
ret = -ENOMEM;
goto cleanup_eng_grps;
}
}
}
return 0;
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
return ret;
}
static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct fw_info_t fw_info;
int ret;
mutex_lock(&eng_grps->lock);
ret = cpt_ucode_load_fw(pdev, &fw_info);
if (ret) {
mutex_unlock(&eng_grps->lock);
return ret;
}
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for AE\n");
ret = -EINVAL;
goto release_fw;
}
engs[0].type = OTX2_CPT_AE_TYPES;
engs[0].count = 2;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 0);
if (ret)
goto release_fw;
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for SE\n");
ret = -EINVAL;
goto delete_eng_grp;
}
engs[0].type = OTX2_CPT_SE_TYPES;
engs[0].count = 2;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 0);
if (ret)
goto delete_eng_grp;
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for IE\n");
ret = -EINVAL;
goto delete_eng_grp;
}
engs[0].type = OTX2_CPT_IE_TYPES;
engs[0].count = 2;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 0);
if (ret)
goto delete_eng_grp;
cpt_ucode_release_fw(&fw_info);
mutex_unlock(&eng_grps->lock);
return 0;
delete_eng_grp:
delete_engine_grps(pdev, eng_grps);
release_fw:
cpt_ucode_release_fw(&fw_info);
mutex_unlock(&eng_grps->lock);
return ret;
}
/*
* Get CPT HW capabilities using LOAD_FVC operation.
*/
int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
{
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct otx2_cpt_iq_command iq_cmd;
union otx2_cpt_opcode opcode;
union otx2_cpt_res_s *result;
union otx2_cpt_inst_s inst;
dma_addr_t rptr_baddr;
struct pci_dev *pdev;
u32 len, compl_rlen;
int ret, etype;
void *rptr;
/*
* We don't get capabilities if it was already done
* (when user enabled VFs for the first time)
*/
if (cptpf->is_eng_caps_discovered)
return 0;
pdev = cptpf->pdev;
/*
* Create engine groups for each type to submit LOAD_FVC op and
* get engine's capabilities.
*/
ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
if (ret)
goto delete_grps;
otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
&cptpf->afpf_mbox, BLKADDR_CPT0);
ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
goto delete_grps;
compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
len = compl_rlen + LOADFVC_RLEN;
result = kzalloc(len, GFP_KERNEL);
if (!result) {
ret = -ENOMEM;
goto lf_cleanup;
}
rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
dev_err(&pdev->dev, "DMA mapping failed\n");
ret = -EFAULT;
goto free_result;
}
rptr = (u8 *)result + compl_rlen;
/* Fill in the command */
opcode.s.major = LOADFVC_MAJOR_OP;
opcode.s.minor = LOADFVC_MINOR_OP;
iq_cmd.cmd.u = 0;
iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
/* 64-bit swap for microcode data reads, not needed for addresses */
cpu_to_be64s(&iq_cmd.cmd.u);
iq_cmd.dptr = 0;
iq_cmd.rptr = rptr_baddr + compl_rlen;
iq_cmd.cptr.u = 0;
for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
while (lfs->ops->cpt_get_compcode(result) ==
OTX2_CPT_COMPLETION_CODE_INIT)
cpu_relax();
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
}
dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
cptpf->is_eng_caps_discovered = true;
free_result:
kfree(result);
lf_cleanup:
otx2_cptlf_shutdown(lfs);
delete_grps:
delete_engine_grps(pdev, &cptpf->eng_grps);
return ret;
}
int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
struct device *dev = &cptpf->pdev->dev;
char *start, *val, *err_msg, *tmp;
int grp_idx = 0, ret = -EINVAL;
bool has_se, has_ie, has_ae;
struct fw_info_t fw_info;
int ucode_idx = 0;
if (!eng_grps->is_grps_created) {
dev_err(dev, "Not allowed before creating the default groups\n");
return -EINVAL;
}
err_msg = "Invalid engine group format";
strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
start = tmp_buf;
has_se = has_ie = has_ae = false;
for (;;) {
val = strsep(&start, ";");
if (!val)
break;
val = strim(val);
if (!*val)
continue;
if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
if (has_se || ucode_idx)
goto err_print;
tmp = strsep(&val, ":");
if (!tmp)
goto err_print;
tmp = strim(tmp);
if (!val)
goto err_print;
if (strlen(tmp) != 2)
goto err_print;
if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
goto err_print;
engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
has_se = true;
} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
if (has_ae || ucode_idx)
goto err_print;
tmp = strsep(&val, ":");
if (!tmp)
goto err_print;
tmp = strim(tmp);
if (!val)
goto err_print;
if (strlen(tmp) != 2)
goto err_print;
if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
goto err_print;
engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
has_ae = true;
} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
if (has_ie || ucode_idx)
goto err_print;
tmp = strsep(&val, ":");
if (!tmp)
goto err_print;
tmp = strim(tmp);
if (!val)
goto err_print;
if (strlen(tmp) != 2)
goto err_print;
if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
goto err_print;
engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
has_ie = true;
} else {
if (ucode_idx > 1)
goto err_print;
if (!strlen(val))
goto err_print;
if (strnstr(val, " ", strlen(val)))
goto err_print;
ucode_filename[ucode_idx++] = val;
}
}
/* Validate input parameters */
if (!(grp_idx && ucode_idx))
goto err_print;
if (ucode_idx > 1 && grp_idx < 2)
goto err_print;
if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
err_msg = "Error max 2 engine types can be attached";
goto err_print;
}
if (grp_idx > 1) {
if ((engs[0].type + engs[1].type) !=
(OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
err_msg = "Only combination of SE+IE engines is allowed";
goto err_print;
}
/* Keep SE engines at zero index */
if (engs[1].type == OTX2_CPT_SE_TYPES)
swap(engs[0], engs[1]);
}
mutex_lock(&eng_grps->lock);
if (cptpf->enabled_vfs) {
dev_err(dev, "Disable VFs before modifying engine groups\n");
ret = -EACCES;
goto err_unlock;
}
INIT_LIST_HEAD(&fw_info.ucodes);
ret = load_fw(dev, &fw_info, ucode_filename[0]);
if (ret) {
dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
goto err_unlock;
}
if (ucode_idx > 1) {
ret = load_fw(dev, &fw_info, ucode_filename[1]);
if (ret) {
dev_err(dev, "Unable to load firmware %s\n",
ucode_filename[1]);
goto release_fw;
}
}
uc_info[0] = get_ucode(&fw_info, engs[0].type);
if (uc_info[0] == NULL) {
dev_err(dev, "Unable to find firmware for %s\n",
get_eng_type_str(engs[0].type));
ret = -EINVAL;
goto release_fw;
}
if (ucode_idx > 1) {
uc_info[1] = get_ucode(&fw_info, engs[1].type);
if (uc_info[1] == NULL) {
dev_err(dev, "Unable to find firmware for %s\n",
get_eng_type_str(engs[1].type));
ret = -EINVAL;
goto release_fw;
}
}
ret = create_engine_group(dev, eng_grps, engs, grp_idx,
(void **)uc_info, 1);
release_fw:
cpt_ucode_release_fw(&fw_info);
err_unlock:
mutex_unlock(&eng_grps->lock);
return ret;
err_print:
dev_err(dev, "%s\n", err_msg);
return ret;
}
int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
struct device *dev = &cptpf->pdev->dev;
char *tmp, *err_msg;
int egrp;
int ret;
err_msg = "Invalid input string format(ex: egrp:0)";
if (strncasecmp(ctx->val.vstr, "egrp", 4))
goto err_print;
tmp = ctx->val.vstr;
strsep(&tmp, ":");
if (!tmp)
goto err_print;
if (kstrtoint(tmp, 10, &egrp))
goto err_print;
if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Invalid engine group %d", egrp);
return -EINVAL;
}
if (!eng_grps->grp[egrp].is_enabled) {
dev_err(dev, "Error engine_group%d is not configured", egrp);
return -EINVAL;
}
mutex_lock(&eng_grps->lock);
ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
mutex_unlock(&eng_grps->lock);
return ret;
err_print:
dev_err(dev, "%s\n", err_msg);
return -EINVAL;
}
static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
int size, int idx)
{
struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
struct otx2_cpt_engs_rsvd *engs;
int len, i;
buf[0] = '\0';
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
if (idx != -1 && idx != i)
continue;
if (eng_grp->mirror.is_ena)
mirrored_engs = find_engines_by_type(
&eng_grp->g->grp[eng_grp->mirror.idx],
engs->type);
if (i > 0 && idx == -1) {
len = strlen(buf);
scnprintf(buf + len, size - len, ", ");
}
len = strlen(buf);
scnprintf(buf + len, size - len, "%d %s ",
mirrored_engs ? engs->count + mirrored_engs->count :
engs->count,
get_eng_type_str(engs->type));
if (mirrored_engs) {
len = strlen(buf);
scnprintf(buf + len, size - len,
"(%d shared with engine_group%d) ",
engs->count <= 0 ?
engs->count + mirrored_engs->count :
mirrored_engs->count,
eng_grp->mirror.idx);
}
}
}
void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
{
struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
struct otx2_cpt_eng_grp_info *mirrored_grp;
char engs_info[2 * OTX2_CPT_NAME_LENGTH];
struct otx2_cpt_eng_grp_info *grp;
struct otx2_cpt_engs_rsvd *engs;
int i, j;
pr_debug("Engine groups global info");
pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
pr_debug("free SE %d", eng_grps->avail.se_cnt);
pr_debug("free IE %d", eng_grps->avail.ie_cnt);
pr_debug("free AE %d", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
pr_debug("engine_group%d, state %s", i,
grp->is_enabled ? "enabled" : "disabled");
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
pr_debug("Ucode0 filename %s, version %s",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
grp->mirror.is_ena ?
mirrored_grp->ucode[0].ver_str :
grp->ucode[0].ver_str);
if (is_2nd_ucode_used(grp))
pr_debug("Ucode1 filename %s, version %s",
grp->ucode[1].filename,
grp->ucode[1].ver_str);
}
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
engs = &grp->engs[j];
if (engs->type) {
u32 mask[5] = { };
get_engs_info(grp, engs_info,
2 * OTX2_CPT_NAME_LENGTH, j);
pr_debug("Slot%d: %s", j, engs_info);
bitmap_to_arr32(mask, engs->bmap,
eng_grps->engs_num);
if (is_dev_otx2(cptpf->pdev))
pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
mask[3], mask[2], mask[1],
mask[0]);
else
pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
mask[4], mask[3], mask[2], mask[1],
mask[0]);
}
}
}
}
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include "otx2_cpt_common.h"
#include "otx2_cptlf.h"
#include "rvu_reg.h"
#define CPT_TIMER_HOLD 0x03F
#define CPT_COUNT_HOLD 32
static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
int time_wait)
{
union otx2_cptx_lf_done_wait done_wait;
done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
lf->slot, OTX2_CPT_LF_DONE_WAIT);
done_wait.s.time_wait = time_wait;
otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
}
static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
{
union otx2_cptx_lf_done_wait done_wait;
done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
lf->slot, OTX2_CPT_LF_DONE_WAIT);
done_wait.s.num_wait = num_wait;
otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
}
static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
int time_wait)
{
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++)
cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
}
static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
{
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++)
cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
}
static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
{
struct otx2_cptlfs_info *lfs = lf->lfs;
union otx2_cptx_af_lf_ctrl lf_ctrl;
int ret;
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
&lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
lf_ctrl.s.pri = pri ? 1 : 0;
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
lf_ctrl.u, lfs->blkaddr);
return ret;
}
static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
int eng_grps_mask)
{
struct otx2_cptlfs_info *lfs = lf->lfs;
union otx2_cptx_af_lf_ctrl lf_ctrl;
int ret;
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
&lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
lf_ctrl.s.grp = eng_grps_mask;
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
lf_ctrl.u, lfs->blkaddr);
return ret;
}
static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
int eng_grp_mask, int pri)
{
int slot, ret = 0;
for (slot = 0; slot < lfs->lfs_num; slot++) {
ret = cptlf_set_pri(&lfs->lf[slot], pri);
if (ret)
return ret;
ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
if (ret)
return ret;
}
return ret;
}
static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
{
/* Disable instruction queues */
otx2_cptlf_disable_iqueues(lfs);
/* Set instruction queues base addresses */
otx2_cptlf_set_iqueues_base_addr(lfs);
/* Set instruction queues sizes */
otx2_cptlf_set_iqueues_size(lfs);
/* Set done interrupts time wait */
cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
/* Set done interrupts num wait */
cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
/* Enable instruction queues */
otx2_cptlf_enable_iqueues(lfs);
}
static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
{
/* Disable instruction queues */
otx2_cptlf_disable_iqueues(lfs);
}
static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
{
union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
OTX2_CPT_LF_MISC_INT_ENA_W1C;
int slot;
irq_misc.s.fault = 0x1;
irq_misc.s.hwerr = 0x1;
irq_misc.s.irde = 0x1;
irq_misc.s.nqerr = 0x1;
irq_misc.s.nwrp = 0x1;
for (slot = 0; slot < lfs->lfs_num; slot++)
otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg,
irq_misc.u);
}
static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
{
int slot;
/* Enable done interrupts */
for (slot = 0; slot < lfs->lfs_num; slot++)
otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
/* Enable Misc interrupts */
cptlf_set_misc_intrs(lfs, true);
}
static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
{
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++)
otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
cptlf_set_misc_intrs(lfs, false);
}
static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_done irq_cnt;
irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE);
return irq_cnt.s.done;
}
static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
{
union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
struct otx2_cptlf_info *lf = arg;
struct device *dev;
dev = &lf->lfs->pdev->dev;
irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
lf->slot, OTX2_CPT_LF_MISC_INT);
irq_misc_ack.u = 0x0;
if (irq_misc.s.fault) {
dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
lf->slot);
irq_misc_ack.s.fault = 0x1;
} else if (irq_misc.s.hwerr) {
dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
lf->slot);
irq_misc_ack.s.hwerr = 0x1;
} else if (irq_misc.s.nwrp) {
dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
lf->slot);
irq_misc_ack.s.nwrp = 0x1;
} else if (irq_misc.s.irde) {
dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
irq_misc_ack.s.irde = 0x1;
} else if (irq_misc.s.nqerr) {
dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
irq_misc_ack.s.nqerr = 0x1;
} else {
dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
return IRQ_NONE;
}
/* Acknowledge interrupts */
otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
return IRQ_HANDLED;
}
static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
{
union otx2_cptx_lf_done_wait done_wait;
struct otx2_cptlf_info *lf = arg;
int irq_cnt;
/* Read the number of completed requests */
irq_cnt = cptlf_read_done_cnt(lf);
if (irq_cnt) {
done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
lf->slot, OTX2_CPT_LF_DONE_WAIT);
/* Acknowledge the number of completed requests */
otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_ACK, irq_cnt);
otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
if (unlikely(!lf->wqe)) {
dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
lf->slot);
return IRQ_NONE;
}
/* Schedule processing of completed requests */
tasklet_hi_schedule(&lf->wqe->work);
}
return IRQ_HANDLED;
}
void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
{
int i, offs, vector;
for (i = 0; i < lfs->lfs_num; i++) {
for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
if (!lfs->lf[i].is_irq_reg[offs])
continue;
vector = pci_irq_vector(lfs->pdev,
lfs->lf[i].msix_offset + offs);
free_irq(vector, &lfs->lf[i]);
lfs->lf[i].is_irq_reg[offs] = false;
}
}
cptlf_disable_intrs(lfs);
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
CRYPTO_DEV_OCTEONTX2_CPT);
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
int lf_num, int irq_offset,
irq_handler_t handler)
{
int ret, vector;
vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
irq_offset);
ret = request_irq(vector, handler, 0,
lfs->lf[lf_num].irq_name[irq_offset],
&lfs->lf[lf_num]);
if (ret)
return ret;
lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
return ret;
}
int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
{
int irq_offs, ret, i;
for (i = 0; i < lfs->lfs_num; i++) {
irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
cptlf_misc_intr_handler);
if (ret)
goto free_irq;
irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
i);
ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
cptlf_done_intr_handler);
if (ret)
goto free_irq;
}
cptlf_enable_intrs(lfs);
return 0;
free_irq:
otx2_cptlf_unregister_interrupts(lfs);
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
int slot, offs;
for (slot = 0; slot < lfs->lfs_num; slot++) {
for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
lfs->lf[slot].msix_offset +
offs), NULL);
free_cpumask_var(lfs->lf[slot].affinity_mask);
}
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
struct otx2_cptlf_info *lf = lfs->lf;
int slot, offs, ret;
for (slot = 0; slot < lfs->lfs_num; slot++) {
if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
dev_err(&lfs->pdev->dev,
"cpumask allocation failed for LF %d", slot);
ret = -ENOMEM;
goto free_affinity_mask;
}
cpumask_set_cpu(cpumask_local_spread(slot,
dev_to_node(&lfs->pdev->dev)),
lf[slot].affinity_mask);
for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
lf[slot].msix_offset + offs),
lf[slot].affinity_mask);
if (ret)
goto free_affinity_mask;
}
}
return 0;
free_affinity_mask:
otx2_cptlf_free_irqs_affinity(lfs);
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
int lfs_num)
{
int slot, ret;
if (!lfs->pdev || !lfs->reg_base)
return -EINVAL;
lfs->lfs_num = lfs_num;
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
if (lfs->lmt_base)
lfs->lf[slot].lmtline = lfs->lmt_base +
(slot * LMTLINE_SIZE);
else
lfs->lf[slot].lmtline = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
lfs->lf[slot].ioreg = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot,
OTX2_CPT_LF_NQX(0));
}
/* Send request to attach LFs */
ret = otx2_cpt_attach_rscrs_msg(lfs);
if (ret)
goto clear_lfs_num;
ret = otx2_cpt_alloc_instruction_queues(lfs);
if (ret) {
dev_err(&lfs->pdev->dev,
"Allocating instruction queues failed\n");
goto detach_rsrcs;
}
cptlf_hw_init(lfs);
/*
* Allow each LF to execute requests destined to any of 8 engine
* groups and set queue priority of each LF to high
*/
ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
if (ret)
goto free_iq;
return 0;
free_iq:
otx2_cpt_free_instruction_queues(lfs);
cptlf_hw_cleanup(lfs);
detach_rsrcs:
otx2_cpt_detach_rsrcs_msg(lfs);
clear_lfs_num:
lfs->lfs_num = 0;
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
lfs->lfs_num = 0;
/* Cleanup LFs hardware side */
cptlf_hw_cleanup(lfs);
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Common module");
MODULE_LICENSE("GPL");
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptlf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
#include "otx2_cpt_common.h"
#include "otx2_cptvf.h"
#include <rvu_reg.h>
int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *otx2_mbox;
cptvf->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
if (!cptvf->bbuf_base)
return -ENOMEM;
/*
* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
* prepare all mbox messages in bounce buffer instead of directly
* in hw mbox memory.
*/
otx2_mbox = &cptvf->pfvf_mbox;
mdev = &otx2_mbox->dev[0];
mdev->mbase = cptvf->bbuf_base;
return 0;
}
static void otx2_cpt_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
{
u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *hdr;
u64 msg_size;
if (mdev->mbase == hw_mbase)
return;
hdr = hw_mbase + mbox->rx_start;
msg_size = hdr->msg_size;
if (msg_size > mbox->rx_size - msgs_offset)
msg_size = mbox->rx_size - msgs_offset;
/* Copy mbox messages from mbox memory to bounce buffer */
memcpy(mdev->mbase + mbox->rx_start,
hw_mbase + mbox->rx_start, msg_size + msgs_offset);
}
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptvf_dev *cptvf = arg;
u64 intr;
/* Read the interrupt bits */
intr = otx2_cpt_read64(cptvf->reg_base, BLKADDR_RVUM, 0,
OTX2_RVU_VF_INT);
if (intr & 0x1ULL) {
/* Schedule work queue function to process the MBOX request */
queue_work(cptvf->pfvf_mbox_wq, &cptvf->pfvf_mbox_work);
/* Clear and ack the interrupt */
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
OTX2_RVU_VF_INT, 0x1ULL);
}
return IRQ_HANDLED;
}
static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct mbox_msghdr *msg)
{
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
struct otx2_cpt_kvf_limits_rsp *rsp_limits;
struct otx2_cpt_egrp_num_rsp *rsp_grp;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
int i;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(&cptvf->pdev->dev,
"MBOX msg with unknown ID %d\n", msg->id);
return;
}
if (msg->sig != OTX2_MBOX_RSP_SIG) {
dev_err(&cptvf->pdev->dev,
"MBOX msg with wrong signature %x, ID %d\n",
msg->sig, msg->id);
return;
}
switch (msg->id) {
case MBOX_MSG_READY:
cptvf->vf_id = ((msg->pcifunc >> RVU_PFVF_FUNC_SHIFT)
& RVU_PFVF_FUNC_MASK) - 1;
break;
case MBOX_MSG_ATTACH_RESOURCES:
/* Check if resources were successfully attached */
if (!msg->rc)
lfs->are_lfs_attached = 1;
break;
case MBOX_MSG_DETACH_RESOURCES:
/* Check if resources were successfully detached */
if (!msg->rc)
lfs->are_lfs_attached = 0;
break;
case MBOX_MSG_MSIX_OFFSET:
rsp_msix = (struct msix_offset_rsp *) msg;
for (i = 0; i < rsp_msix->cptlfs; i++)
lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
break;
case MBOX_MSG_CPT_RD_WR_REGISTER:
rsp_reg = (struct cpt_rd_wr_reg_msg *) msg;
if (msg->rc) {
dev_err(&cptvf->pdev->dev,
"Reg %llx rd/wr(%d) failed %d\n",
rsp_reg->reg_offset, rsp_reg->is_write,
msg->rc);
return;
}
if (!rsp_reg->is_write)
*rsp_reg->ret_val = rsp_reg->val;
break;
case MBOX_MSG_GET_ENG_GRP_NUM:
rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
break;
case MBOX_MSG_GET_KVF_LIMITS:
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
msg->id);
break;
}
}
void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
{
struct otx2_cptvf_dev *cptvf;
struct otx2_mbox *pfvf_mbox;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
int offset, i;
/* sync with mbox memory region */
smp_rmb();
cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
pfvf_mbox = &cptvf->pfvf_mbox;
otx2_cpt_sync_mbox_bbuf(pfvf_mbox, 0);
mdev = &pfvf_mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
if (rsp_hdr->num_msgs == 0)
return;
offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
for (i = 0; i < rsp_hdr->num_msgs; i++) {
msg = (struct mbox_msghdr *)(mdev->mbase + pfvf_mbox->rx_start +
offset);
process_pfvf_mbox_mbox_msg(cptvf, msg);
offset = msg->next_msgoff;
mdev->msgs_acked++;
}
otx2_mbox_reset(pfvf_mbox, 0);
}
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
{
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct otx2_cpt_egrp_num_msg *req;
req = (struct otx2_cpt_egrp_num_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct otx2_cpt_egrp_num_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
{
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct mbox_msghdr *req;
req = (struct mbox_msghdr *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct otx2_cpt_kvf_limits_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
| linux-master | drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
* Author: Arnaud Ebalard <arno@natisbad.org>
*
* This work is based on an initial version written by
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
*/
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include "cesa.h"
struct mv_cesa_ahash_dma_iter {
struct mv_cesa_dma_iter base;
struct mv_cesa_sg_dma_iter src;
};
static inline void
mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
unsigned int len = req->nbytes + creq->cache_ptr;
if (!creq->last_req)
len &= ~CESA_HASH_BLOCK_SIZE_MSK;
mv_cesa_req_dma_iter_init(&iter->base, len);
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
iter->src.op_offset = creq->cache_ptr;
}
static inline bool
mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
{
iter->src.op_offset = 0;
return mv_cesa_req_dma_iter_next_op(&iter->base);
}
static inline int
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
{
req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
&req->cache_dma);
if (!req->cache)
return -ENOMEM;
return 0;
}
static inline void
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
{
if (!req->cache)
return;
dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
req->cache_dma);
}
static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
gfp_t flags)
{
if (req->padding)
return 0;
req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
&req->padding_dma);
if (!req->padding)
return -ENOMEM;
return 0;
}
static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
{
if (!req->padding)
return;
dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
req->padding_dma);
req->padding = NULL;
}
static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
mv_cesa_ahash_dma_free_padding(&creq->req.dma);
}
static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
mv_cesa_dma_cleanup(&creq->base);
}
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
}
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_last_cleanup(req);
}
static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
{
unsigned int index, padlen;
index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
return padlen;
}
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
{
unsigned int padlen;
buf[0] = 0x80;
/* Pad out to 56 mod 64 */
padlen = mv_cesa_ahash_pad_len(creq);
memset(buf + 1, 0, padlen - 1);
if (creq->algo_le) {
__le64 bits = cpu_to_le64(creq->len << 3);
memcpy(buf + padlen, &bits, sizeof(bits));
} else {
__be64 bits = cpu_to_be64(creq->len << 3);
memcpy(buf + padlen, &bits, sizeof(bits));
}
return padlen + 8;
}
static void mv_cesa_ahash_std_step(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
struct mv_cesa_engine *engine = creq->base.engine;
struct mv_cesa_op_ctx *op;
unsigned int new_cache_ptr = 0;
u32 frag_mode;
size_t len;
unsigned int digsize;
int i;
mv_cesa_adjust_op(engine, &creq->op_tmpl);
if (engine->pool)
memcpy(engine->sram_pool, &creq->op_tmpl,
sizeof(creq->op_tmpl));
else
memcpy_toio(engine->sram, &creq->op_tmpl,
sizeof(creq->op_tmpl));
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
writel_relaxed(creq->state[i],
engine->regs + CESA_IVDIG(i));
}
if (creq->cache_ptr) {
if (engine->pool)
memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
creq->cache, creq->cache_ptr);
else
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
creq->cache, creq->cache_ptr);
}
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
if (!creq->last_req) {
new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
len &= ~CESA_HASH_BLOCK_SIZE_MSK;
}
if (len - creq->cache_ptr)
sreq->offset += mv_cesa_sg_copy_to_sram(
engine, req->src, creq->src_nents,
CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
len - creq->cache_ptr, sreq->offset);
op = &creq->op_tmpl;
frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
if (creq->last_req && sreq->offset == req->nbytes &&
creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
}
if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
if (len &&
creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
mv_cesa_set_mac_op_total_len(op, creq->len);
} else {
int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
len &= CESA_HASH_BLOCK_SIZE_MSK;
new_cache_ptr = 64 - trailerlen;
if (engine->pool)
memcpy(creq->cache,
engine->sram_pool +
CESA_SA_DATA_SRAM_OFFSET + len,
new_cache_ptr);
else
memcpy_fromio(creq->cache,
engine->sram +
CESA_SA_DATA_SRAM_OFFSET +
len,
new_cache_ptr);
} else {
i = mv_cesa_ahash_pad_req(creq, creq->cache);
len += i;
if (engine->pool)
memcpy(engine->sram_pool + len +
CESA_SA_DATA_SRAM_OFFSET,
creq->cache, i);
else
memcpy_toio(engine->sram + len +
CESA_SA_DATA_SRAM_OFFSET,
creq->cache, i);
}
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
else
frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
}
}
mv_cesa_set_mac_op_frag_len(op, len);
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
/* FIXME: only update enc_len field */
if (engine->pool)
memcpy(engine->sram_pool, op, sizeof(*op));
else
memcpy_toio(engine->sram, op, sizeof(*op));
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
creq->cache_ptr = new_cache_ptr;
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
if (sreq->offset < (req->nbytes - creq->cache_ptr))
return -EINPROGRESS;
return 0;
}
static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_req *basereq = &creq->base;
mv_cesa_dma_prepare(basereq, basereq->engine);
}
static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
sreq->offset = 0;
}
static void mv_cesa_ahash_dma_step(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_req *base = &creq->base;
/* We must explicitly set the digest state. */
if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
struct mv_cesa_engine *engine = base->engine;
int i;
/* Set the hash state in the IVDIG regs. */
for (i = 0; i < ARRAY_SIZE(creq->state); i++)
writel_relaxed(creq->state[i], engine->regs +
CESA_IVDIG(i));
}
mv_cesa_dma_step(base);
}
static void mv_cesa_ahash_step(struct crypto_async_request *req)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_step(ahashreq);
else
mv_cesa_ahash_std_step(ahashreq);
}
static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
return mv_cesa_dma_process(&creq->base, status);
return mv_cesa_ahash_std_process(ahashreq, status);
}
static void mv_cesa_ahash_complete(struct crypto_async_request *req)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int digsize;
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
__le32 *data = NULL;
/*
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
for (i = 0; i < digsize / 4; i++)
creq->state[i] = le32_to_cpu(data[i]);
memcpy(ahashreq->result, data, digsize);
} else {
for (i = 0; i < digsize / 4; i++)
creq->state[i] = readl_relaxed(engine->regs +
CESA_IVDIG(i));
if (creq->last_req) {
/*
* Hardware's MD5 digest is in little endian format, but
* SHA in big endian format
*/
if (creq->algo_le) {
__le32 *result = (void *)ahashreq->result;
for (i = 0; i < digsize / 4; i++)
result[i] = cpu_to_le32(creq->state[i]);
} else {
__be32 *result = (void *)ahashreq->result;
for (i = 0; i < digsize / 4; i++)
result[i] = cpu_to_be32(creq->state[i]);
}
}
}
atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
struct mv_cesa_engine *engine)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_prepare(ahashreq);
else
mv_cesa_ahash_std_prepare(ahashreq);
}
static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (creq->last_req)
mv_cesa_ahash_last_cleanup(ahashreq);
mv_cesa_ahash_cleanup(ahashreq);
if (creq->cache_ptr)
sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
creq->cache,
creq->cache_ptr,
ahashreq->nbytes - creq->cache_ptr);
}
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
.step = mv_cesa_ahash_step,
.process = mv_cesa_ahash_process,
.cleanup = mv_cesa_ahash_req_cleanup,
.complete = mv_cesa_ahash_complete,
};
static void mv_cesa_ahash_init(struct ahash_request *req,
struct mv_cesa_op_ctx *tmpl, bool algo_le)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
memset(creq, 0, sizeof(*creq));
mv_cesa_update_op_cfg(tmpl,
CESA_SA_DESC_CFG_OP_MAC_ONLY |
CESA_SA_DESC_CFG_FIRST_FRAG,
CESA_SA_DESC_CFG_OP_MSK |
CESA_SA_DESC_CFG_FRAG_MSK);
mv_cesa_set_mac_op_total_len(tmpl, 0);
mv_cesa_set_mac_op_frag_len(tmpl, 0);
creq->op_tmpl = *tmpl;
creq->len = 0;
creq->algo_le = algo_le;
}
static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
{
struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->base.ops = &mv_cesa_ahash_req_ops;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct mv_cesa_ahash_req));
return 0;
}
static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bool cached = false;
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
!creq->last_req) {
cached = true;
if (!req->nbytes)
return cached;
sg_pcopy_to_buffer(req->src, creq->src_nents,
creq->cache + creq->cache_ptr,
req->nbytes, 0);
creq->cache_ptr += req->nbytes;
}
return cached;
}
static struct mv_cesa_op_ctx *
mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
gfp_t flags)
{
struct mv_cesa_op_ctx *op;
int ret;
op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
if (IS_ERR(op))
return op;
/* Set the operation block fragment length. */
mv_cesa_set_mac_op_frag_len(op, frag_len);
/* Append dummy desc to launch operation */
ret = mv_cesa_dma_add_dummy_launch(chain, flags);
if (ret)
return ERR_PTR(ret);
if (mv_cesa_mac_op_is_first_frag(tmpl))
mv_cesa_update_op_cfg(tmpl,
CESA_SA_DESC_CFG_MID_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
return op;
}
static int
mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_ahash_req *creq,
gfp_t flags)
{
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
int ret;
if (!creq->cache_ptr)
return 0;
ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
if (ret)
return ret;
memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
return mv_cesa_dma_add_data_transfer(chain,
CESA_SA_DATA_SRAM_OFFSET,
ahashdreq->cache_dma,
creq->cache_ptr,
CESA_TDMA_DST_IN_SRAM,
flags);
}
static struct mv_cesa_op_ctx *
mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_ahash_dma_iter *dma_iter,
struct mv_cesa_ahash_req *creq,
unsigned int frag_len, gfp_t flags)
{
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
unsigned int len, trailerlen, padoff = 0;
struct mv_cesa_op_ctx *op;
int ret;
/*
* If the transfer is smaller than our maximum length, and we have
* some data outstanding, we can ask the engine to finish the hash.
*/
if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
flags);
if (IS_ERR(op))
return op;
mv_cesa_set_mac_op_total_len(op, creq->len);
mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
CESA_SA_DESC_CFG_NOT_FRAG :
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
ret = mv_cesa_dma_add_result_op(chain,
CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
return ERR_PTR(-ENOMEM);
return op;
}
/*
* The request is longer than the engine can handle, or we have
* no data outstanding. Manually generate the padding, adding it
* as a "mid" fragment.
*/
ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
if (ret)
return ERR_PTR(ret);
trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
if (len) {
ret = mv_cesa_dma_add_data_transfer(chain,
CESA_SA_DATA_SRAM_OFFSET +
frag_len,
ahashdreq->padding_dma,
len, CESA_TDMA_DST_IN_SRAM,
flags);
if (ret)
return ERR_PTR(ret);
op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
flags);
if (IS_ERR(op))
return op;
if (len == trailerlen)
return op;
padoff += len;
}
ret = mv_cesa_dma_add_data_transfer(chain,
CESA_SA_DATA_SRAM_OFFSET,
ahashdreq->padding_dma +
padoff,
trailerlen - padoff,
CESA_TDMA_DST_IN_SRAM,
flags);
if (ret)
return ERR_PTR(ret);
return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
flags);
}
static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
bool set_state = false;
int ret;
u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
set_state = true;
if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
if (!ret) {
ret = -ENOMEM;
goto err;
}
}
mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_ahash_req_iter_init(&iter, req);
/*
* Add the cache (left-over data from a previous block) first.
* This will never overflow the SRAM size.
*/
ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
if (ret)
goto err_free_tdma;
if (iter.src.sg) {
/*
* Add all the new data, inserting an operation block and
* launch command between each full SRAM block-worth of
* data. We intentionally do not add the final op block.
*/
while (true) {
ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
&iter.base,
&iter.src, flags);
if (ret)
goto err_free_tdma;
frag_len = iter.base.op_len;
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
op = mv_cesa_dma_add_frag(&basereq->chain,
&creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
}
}
} else {
/* Account for the data that was in the cache. */
frag_len = iter.base.op_len;
}
/*
* At this point, frag_len indicates whether we have any data
* outstanding which needs an operation. Queue up the final
* operation, which depends whether this is the final request.
*/
if (creq->last_req)
op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
frag_len, flags);
else if (frag_len)
op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
}
/*
* If results are copied via DMA, this means that this
* request can be directly processed by the engine,
* without partial updates. So we can chain it at the
* DMA level with other requests.
*/
type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
}
if (!creq->last_req)
creq->cache_ptr = req->nbytes + creq->cache_ptr -
iter.base.len;
else
creq->cache_ptr = 0;
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
if (type != CESA_TDMA_RESULT)
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
if (set_state) {
/*
* Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
* let the step logic know that the IVDIG registers should be
* explicitly set before launching a TDMA chain.
*/
basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
}
return 0;
err_free_tdma:
mv_cesa_dma_cleanup(basereq);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
err:
mv_cesa_ahash_last_cleanup(req);
return ret;
}
static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
*cached = mv_cesa_ahash_cache_req(req);
if (*cached)
return 0;
if (cesa_dev->caps->has_tdma)
return mv_cesa_ahash_dma_req_init(req);
else
return 0;
}
static int mv_cesa_ahash_queue_req(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_engine *engine;
bool cached = false;
int ret;
ret = mv_cesa_ahash_req_init(req, &cached);
if (ret)
return ret;
if (cached)
return 0;
engine = mv_cesa_select_engine(req->nbytes);
mv_cesa_ahash_prepare(&req->base, engine);
ret = mv_cesa_queue_req(&req->base, &creq->base);
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
}
static int mv_cesa_ahash_update(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
creq->len += req->nbytes;
return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_final(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
creq->last_req = true;
req->nbytes = 0;
return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_finup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
creq->len += req->nbytes;
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
creq->last_req = true;
return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
u64 *len, void *cache)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
unsigned int digsize = crypto_ahash_digestsize(ahash);
unsigned int blocksize;
blocksize = crypto_ahash_blocksize(ahash);
*len = creq->len;
memcpy(hash, creq->state, digsize);
memset(cache, 0, blocksize);
memcpy(cache, creq->cache, creq->cache_ptr);
return 0;
}
static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
u64 len, const void *cache)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
unsigned int digsize = crypto_ahash_digestsize(ahash);
unsigned int blocksize;
unsigned int cache_ptr;
int ret;
ret = crypto_ahash_init(req);
if (ret)
return ret;
blocksize = crypto_ahash_blocksize(ahash);
if (len >= blocksize)
mv_cesa_update_op_cfg(&creq->op_tmpl,
CESA_SA_DESC_CFG_MID_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
creq->len = len;
memcpy(creq->state, hash, digsize);
creq->cache_ptr = 0;
cache_ptr = do_div(len, blocksize);
if (!cache_ptr)
return 0;
memcpy(creq->cache, cache, cache_ptr);
creq->cache_ptr = cache_ptr;
return 0;
}
static int mv_cesa_md5_init(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
mv_cesa_ahash_init(req, &tmpl, true);
creq->state[0] = MD5_H0;
creq->state[1] = MD5_H1;
creq->state[2] = MD5_H2;
creq->state[3] = MD5_H3;
return 0;
}
static int mv_cesa_md5_export(struct ahash_request *req, void *out)
{
struct md5_state *out_state = out;
return mv_cesa_ahash_export(req, out_state->hash,
&out_state->byte_count, out_state->block);
}
static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
{
const struct md5_state *in_state = in;
return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
in_state->block);
}
static int mv_cesa_md5_digest(struct ahash_request *req)
{
int ret;
ret = mv_cesa_md5_init(req);
if (ret)
return ret;
return mv_cesa_ahash_finup(req);
}
struct ahash_alg mv_md5_alg = {
.init = mv_cesa_md5_init,
.update = mv_cesa_ahash_update,
.final = mv_cesa_ahash_final,
.finup = mv_cesa_ahash_finup,
.digest = mv_cesa_md5_digest,
.export = mv_cesa_md5_export,
.import = mv_cesa_md5_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "mv-md5",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
}
}
};
static int mv_cesa_sha1_init(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
mv_cesa_ahash_init(req, &tmpl, false);
creq->state[0] = SHA1_H0;
creq->state[1] = SHA1_H1;
creq->state[2] = SHA1_H2;
creq->state[3] = SHA1_H3;
creq->state[4] = SHA1_H4;
return 0;
}
static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
{
struct sha1_state *out_state = out;
return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
out_state->buffer);
}
static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
{
const struct sha1_state *in_state = in;
return mv_cesa_ahash_import(req, in_state->state, in_state->count,
in_state->buffer);
}
static int mv_cesa_sha1_digest(struct ahash_request *req)
{
int ret;
ret = mv_cesa_sha1_init(req);
if (ret)
return ret;
return mv_cesa_ahash_finup(req);
}
struct ahash_alg mv_sha1_alg = {
.init = mv_cesa_sha1_init,
.update = mv_cesa_ahash_update,
.final = mv_cesa_ahash_final,
.finup = mv_cesa_ahash_finup,
.digest = mv_cesa_sha1_digest,
.export = mv_cesa_sha1_export,
.import = mv_cesa_sha1_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "mv-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
}
}
};
static int mv_cesa_sha256_init(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
mv_cesa_ahash_init(req, &tmpl, false);
creq->state[0] = SHA256_H0;
creq->state[1] = SHA256_H1;
creq->state[2] = SHA256_H2;
creq->state[3] = SHA256_H3;
creq->state[4] = SHA256_H4;
creq->state[5] = SHA256_H5;
creq->state[6] = SHA256_H6;
creq->state[7] = SHA256_H7;
return 0;
}
static int mv_cesa_sha256_digest(struct ahash_request *req)
{
int ret;
ret = mv_cesa_sha256_init(req);
if (ret)
return ret;
return mv_cesa_ahash_finup(req);
}
static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
{
struct sha256_state *out_state = out;
return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
out_state->buf);
}
static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
{
const struct sha256_state *in_state = in;
return mv_cesa_ahash_import(req, in_state->state, in_state->count,
in_state->buf);
}
struct ahash_alg mv_sha256_alg = {
.init = mv_cesa_sha256_init,
.update = mv_cesa_ahash_update,
.final = mv_cesa_ahash_final,
.finup = mv_cesa_ahash_finup,
.digest = mv_cesa_sha256_digest,
.export = mv_cesa_sha256_export,
.import = mv_cesa_sha256_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "mv-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
}
}
};
static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
void *state, unsigned int blocksize)
{
DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
sg_init_one(&sg, pad, blocksize);
ahash_request_set_crypt(req, &sg, pad, blocksize);
ret = crypto_ahash_init(req);
if (ret)
return ret;
ret = crypto_ahash_update(req);
ret = crypto_wait_req(ret, &result);
if (ret)
return ret;
ret = crypto_ahash_export(req, state);
if (ret)
return ret;
return 0;
}
static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
const u8 *key, unsigned int keylen,
u8 *ipad, u8 *opad,
unsigned int blocksize)
{
DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
int i;
if (keylen <= blocksize) {
memcpy(ipad, key, keylen);
} else {
u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
if (!keydup)
return -ENOMEM;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &result);
sg_init_one(&sg, keydup, keylen);
ahash_request_set_crypt(req, &sg, ipad, keylen);
ret = crypto_ahash_digest(req);
ret = crypto_wait_req(ret, &result);
/* Set the memory region to 0 to avoid any leak. */
kfree_sensitive(keydup);
if (ret)
return ret;
keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
}
memset(ipad + keylen, 0, blocksize - keylen);
memcpy(opad, ipad, blocksize);
for (i = 0; i < blocksize; i++) {
ipad[i] ^= HMAC_IPAD_VALUE;
opad[i] ^= HMAC_OPAD_VALUE;
}
return 0;
}
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
const u8 *key, unsigned int keylen,
void *istate, void *ostate)
{
struct ahash_request *req;
struct crypto_ahash *tfm;
unsigned int blocksize;
u8 *ipad = NULL;
u8 *opad;
int ret;
tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto free_ahash;
}
crypto_ahash_clear_flags(tfm, ~0);
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
ipad = kcalloc(2, blocksize, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto free_req;
}
opad = ipad + blocksize;
ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
if (ret)
goto free_ipad;
ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
if (ret)
goto free_ipad;
ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
free_ipad:
kfree(ipad);
free_req:
ahash_request_free(req);
free_ahash:
crypto_free_ahash(tfm);
return ret;
}
static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->base.ops = &mv_cesa_ahash_req_ops;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct mv_cesa_ahash_req));
return 0;
}
static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
mv_cesa_ahash_init(req, &tmpl, true);
return 0;
}
static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct md5_state istate, ostate;
int ret, i;
ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
ctx->iv[i] = cpu_to_be32(istate.hash[i]);
for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
return 0;
}
static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
{
int ret;
ret = mv_cesa_ahmac_md5_init(req);
if (ret)
return ret;
return mv_cesa_ahash_finup(req);
}
struct ahash_alg mv_ahmac_md5_alg = {
.init = mv_cesa_ahmac_md5_init,
.update = mv_cesa_ahash_update,
.final = mv_cesa_ahash_final,
.finup = mv_cesa_ahash_finup,
.digest = mv_cesa_ahmac_md5_digest,
.setkey = mv_cesa_ahmac_md5_setkey,
.export = mv_cesa_md5_export,
.import = mv_cesa_md5_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "mv-hmac-md5",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
}
}
};
static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
mv_cesa_ahash_init(req, &tmpl, false);
return 0;
}
static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct sha1_state istate, ostate;
int ret, i;
ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
ctx->iv[i] = cpu_to_be32(istate.state[i]);
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
return 0;
}
static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
{
int ret;
ret = mv_cesa_ahmac_sha1_init(req);
if (ret)
return ret;
return mv_cesa_ahash_finup(req);
}
struct ahash_alg mv_ahmac_sha1_alg = {
.init = mv_cesa_ahmac_sha1_init,
.update = mv_cesa_ahash_update,
.final = mv_cesa_ahash_final,
.finup = mv_cesa_ahash_finup,
.digest = mv_cesa_ahmac_sha1_digest,
.setkey = mv_cesa_ahmac_sha1_setkey,
.export = mv_cesa_sha1_export,
.import = mv_cesa_sha1_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "mv-hmac-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
}
}
};
static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct sha256_state istate, ostate;
int ret, i;
ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
ctx->iv[i] = cpu_to_be32(istate.state[i]);
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
return 0;
}
static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
{
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
mv_cesa_ahash_init(req, &tmpl, false);
return 0;
}
static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
{
int ret;
ret = mv_cesa_ahmac_sha256_init(req);
if (ret)
return ret;
return mv_cesa_ahash_finup(req);
}
struct ahash_alg mv_ahmac_sha256_alg = {
.init = mv_cesa_ahmac_sha256_init,
.update = mv_cesa_ahash_update,
.final = mv_cesa_ahash_final,
.finup = mv_cesa_ahash_finup,
.digest = mv_cesa_ahmac_sha256_digest,
.setkey = mv_cesa_ahmac_sha256_setkey,
.export = mv_cesa_sha256_export,
.import = mv_cesa_sha256_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "mv-hmac-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
}
}
};
| linux-master | drivers/crypto/marvell/cesa/hash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Provide TDMA helper functions used by cipher and hash algorithm
* implementations.
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
* Author: Arnaud Ebalard <arno@natisbad.org>
*
* This work is based on an initial version written by
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
*/
#include "cesa.h"
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
struct mv_cesa_sg_dma_iter *sgiter,
unsigned int len)
{
if (!sgiter->sg)
return false;
sgiter->op_offset += len;
sgiter->offset += len;
if (sgiter->offset == sg_dma_len(sgiter->sg)) {
if (sg_is_last(sgiter->sg))
return false;
sgiter->offset = 0;
sgiter->sg = sg_next(sgiter->sg);
}
if (sgiter->op_offset == iter->op_len)
return false;
return true;
}
void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
struct mv_cesa_engine *engine = dreq->engine;
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
engine->regs + CESA_TDMA_CONTROL);
writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
{
struct mv_cesa_tdma_desc *tdma;
for (tdma = dreq->chain.first; tdma;) {
struct mv_cesa_tdma_desc *old_tdma = tdma;
u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
old_tdma->cur_dma);
}
dreq->chain.first = NULL;
dreq->chain.last = NULL;
}
void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
struct mv_cesa_engine *engine)
{
struct mv_cesa_tdma_desc *tdma;
for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
tdma->dst = cpu_to_le32(tdma->dst_dma + engine->sram_dma);
if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
tdma->src = cpu_to_le32(tdma->src_dma + engine->sram_dma);
if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
mv_cesa_adjust_op(engine, tdma->op);
}
}
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
struct mv_cesa_req *dreq)
{
if (engine->chain.first == NULL && engine->chain.last == NULL) {
engine->chain.first = dreq->chain.first;
engine->chain.last = dreq->chain.last;
} else {
struct mv_cesa_tdma_desc *last;
last = engine->chain.last;
last->next = dreq->chain.first;
engine->chain.last = dreq->chain.last;
/*
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
* the last element of the current chain, or if the request
* being queued needs the IV regs to be set before lauching
* the request.
*/
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
}
}
int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
{
struct crypto_async_request *req = NULL;
struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
dma_addr_t tdma_cur;
int res = 0;
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
for (tdma = engine->chain.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
next = tdma->next;
spin_unlock_bh(&engine->lock);
if (tdma->flags & CESA_TDMA_END_OF_REQ) {
struct crypto_async_request *backlog = NULL;
struct mv_cesa_ctx *ctx;
u32 current_status;
spin_lock_bh(&engine->lock);
/*
* if req is NULL, this means we're processing the
* request in engine->req.
*/
if (!req)
req = engine->req;
else
req = mv_cesa_dequeue_req_locked(engine,
&backlog);
/* Re-chaining to the next request */
engine->chain.first = tdma->next;
tdma->next = NULL;
/* If this is the last request, clear the chain */
if (engine->chain.first == NULL)
engine->chain.last = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
current_status = (tdma->cur_dma == tdma_cur) ?
status : CESA_SA_INT_ACC0_IDMA_DONE;
res = ctx->ops->process(req, current_status);
ctx->ops->complete(req);
if (res == 0)
mv_cesa_engine_enqueue_complete_request(engine,
req);
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
}
if (res || tdma->cur_dma == tdma_cur)
break;
}
/*
* Save the last request in error to engine->req, so that the core
* knows which request was faulty
*/
if (res) {
spin_lock_bh(&engine->lock);
engine->req = req;
spin_unlock_bh(&engine->lock);
}
return res;
}
static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
struct mv_cesa_tdma_desc *new_tdma = NULL;
dma_addr_t dma_handle;
new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
&dma_handle);
if (!new_tdma)
return ERR_PTR(-ENOMEM);
new_tdma->cur_dma = dma_handle;
if (chain->last) {
chain->last->next_dma = cpu_to_le32(dma_handle);
chain->last->next = new_tdma;
} else {
chain->first = new_tdma;
}
chain->last = new_tdma;
return new_tdma;
}
int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
/* We re-use an existing op_desc object to retrieve the context
* and result instead of allocating a new one.
* There is at least one object of this type in a CESA crypto
* req, just pick the first one in the chain.
*/
for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
if (type == CESA_TDMA_OP)
break;
}
if (!op_desc)
return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src_dma = src;
tdma->dst_dma = op_desc->src_dma;
tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
gfp_t flags)
{
struct mv_cesa_tdma_desc *tdma;
struct mv_cesa_op_ctx *op;
dma_addr_t dma_handle;
unsigned int size;
tdma = mv_cesa_dma_add_desc(chain, flags);
if (IS_ERR(tdma))
return ERR_CAST(tdma);
op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
if (!op)
return ERR_PTR(-ENOMEM);
*op = *op_templ;
size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
tdma = chain->last;
tdma->op = op;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = cpu_to_le32(dma_handle);
tdma->dst_dma = CESA_SA_CFG_SRAM_OFFSET;
tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
return op;
}
int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
dma_addr_t dst, dma_addr_t src, u32 size,
u32 flags, gfp_t gfp_flags)
{
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src_dma = src;
tdma->dst_dma = dst;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
tdma->flags = flags | CESA_TDMA_DATA;
return 0;
}
int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
return PTR_ERR_OR_ZERO(tdma);
}
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
tdma->byte_cnt = cpu_to_le32(BIT(31));
return 0;
}
int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_dma_iter *dma_iter,
struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags)
{
u32 flags = sgiter->dir == DMA_TO_DEVICE ?
CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
unsigned int len;
do {
dma_addr_t dst, src;
int ret;
len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
if (sgiter->dir == DMA_TO_DEVICE) {
dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
src = sg_dma_address(sgiter->sg) + sgiter->offset;
} else {
dst = sg_dma_address(sgiter->sg) + sgiter->offset;
src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
}
ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
flags, gfp_flags);
if (ret)
return ret;
} while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));
return 0;
}
size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
struct scatterlist *sgl, unsigned int nents,
unsigned int sram_off, size_t buflen, off_t skip,
bool to_sram)
{
unsigned int sg_flags = SG_MITER_ATOMIC;
struct sg_mapping_iter miter;
unsigned int offset = 0;
if (to_sram)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip))
return 0;
while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len;
len = min(miter.length, buflen - offset);
if (to_sram) {
if (engine->pool)
memcpy(engine->sram_pool + sram_off + offset,
miter.addr, len);
else
memcpy_toio(engine->sram + sram_off + offset,
miter.addr, len);
} else {
if (engine->pool)
memcpy(miter.addr,
engine->sram_pool + sram_off + offset,
len);
else
memcpy_fromio(miter.addr,
engine->sram + sram_off + offset,
len);
}
offset += len;
}
sg_miter_stop(&miter);
return offset;
}
| linux-master | drivers/crypto/marvell/cesa/tdma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
* that can be found on the following platform: Orion, Kirkwood, Armada. This
* driver supports the TDMA engine on platforms on which it is available.
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
* Author: Arnaud Ebalard <arno@natisbad.org>
*
* This work is based on an initial version written by
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kthread.h>
#include <linux/mbus.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include "cesa.h"
/* Limit of the crypto queue before reaching the backlog */
#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
struct mv_cesa_dev *cesa_dev;
struct crypto_async_request *
mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
struct crypto_async_request **backlog)
{
struct crypto_async_request *req;
*backlog = crypto_get_backlog(&engine->queue);
req = crypto_dequeue_request(&engine->queue);
if (!req)
return NULL;
return req;
}
static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
{
struct crypto_async_request *req = NULL, *backlog = NULL;
struct mv_cesa_ctx *ctx;
spin_lock_bh(&engine->lock);
if (!engine->req) {
req = mv_cesa_dequeue_req_locked(engine, &backlog);
engine->req = req;
}
spin_unlock_bh(&engine->lock);
if (!req)
return;
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
}
static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
{
struct crypto_async_request *req;
struct mv_cesa_ctx *ctx;
int res;
req = engine->req;
ctx = crypto_tfm_ctx(req->tfm);
res = ctx->ops->process(req, status);
if (res == 0) {
ctx->ops->complete(req);
mv_cesa_engine_enqueue_complete_request(engine, req);
} else if (res == -EINPROGRESS) {
ctx->ops->step(req);
}
return res;
}
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
{
if (engine->chain.first && engine->chain.last)
return mv_cesa_tdma_process(engine, status);
return mv_cesa_std_process(engine, status);
}
static inline void
mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
int res)
{
ctx->ops->cleanup(req);
local_bh_disable();
crypto_request_complete(req, res);
local_bh_enable();
}
static irqreturn_t mv_cesa_int(int irq, void *priv)
{
struct mv_cesa_engine *engine = priv;
struct crypto_async_request *req;
struct mv_cesa_ctx *ctx;
u32 status, mask;
irqreturn_t ret = IRQ_NONE;
while (true) {
int res;
mask = mv_cesa_get_int_mask(engine);
status = readl(engine->regs + CESA_SA_INT_STATUS);
if (!(status & mask))
break;
/*
* TODO: avoid clearing the FPGA_INT_STATUS if this not
* relevant on some platforms.
*/
writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
writel(~status, engine->regs + CESA_SA_INT_STATUS);
/* Process fetched requests */
res = mv_cesa_int_process(engine, status & mask);
ret = IRQ_HANDLED;
spin_lock_bh(&engine->lock);
req = engine->req;
if (res != -EINPROGRESS)
engine->req = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
if (res && res != -EINPROGRESS)
mv_cesa_complete_req(ctx, req, res);
/* Launch the next pending request */
mv_cesa_rearm_engine(engine);
/* Iterate over the complete queue */
while (true) {
req = mv_cesa_engine_dequeue_complete_request(engine);
if (!req)
break;
ctx = crypto_tfm_ctx(req->tfm);
mv_cesa_complete_req(ctx, req, 0);
}
}
return ret;
}
int mv_cesa_queue_req(struct crypto_async_request *req,
struct mv_cesa_req *creq)
{
int ret;
struct mv_cesa_engine *engine = creq->engine;
spin_lock_bh(&engine->lock);
ret = crypto_enqueue_request(&engine->queue, req);
if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
(ret == -EINPROGRESS || ret == -EBUSY))
mv_cesa_tdma_chain(engine, creq);
spin_unlock_bh(&engine->lock);
if (ret != -EINPROGRESS)
return ret;
mv_cesa_rearm_engine(engine);
return -EINPROGRESS;
}
static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
{
int ret;
int i, j;
for (i = 0; i < cesa->caps->ncipher_algs; i++) {
ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
if (ret)
goto err_unregister_crypto;
}
for (i = 0; i < cesa->caps->nahash_algs; i++) {
ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
if (ret)
goto err_unregister_ahash;
}
return 0;
err_unregister_ahash:
for (j = 0; j < i; j++)
crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
i = cesa->caps->ncipher_algs;
err_unregister_crypto:
for (j = 0; j < i; j++)
crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
return ret;
}
static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
{
int i;
for (i = 0; i < cesa->caps->nahash_algs; i++)
crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
for (i = 0; i < cesa->caps->ncipher_algs; i++)
crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
}
static struct skcipher_alg *orion_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
&mv_cesa_cbc_des3_ede_alg,
&mv_cesa_ecb_aes_alg,
&mv_cesa_cbc_aes_alg,
};
static struct ahash_alg *orion_ahash_algs[] = {
&mv_md5_alg,
&mv_sha1_alg,
&mv_ahmac_md5_alg,
&mv_ahmac_sha1_alg,
};
static struct skcipher_alg *armada_370_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
&mv_cesa_cbc_des3_ede_alg,
&mv_cesa_ecb_aes_alg,
&mv_cesa_cbc_aes_alg,
};
static struct ahash_alg *armada_370_ahash_algs[] = {
&mv_md5_alg,
&mv_sha1_alg,
&mv_sha256_alg,
&mv_ahmac_md5_alg,
&mv_ahmac_sha1_alg,
&mv_ahmac_sha256_alg,
};
static const struct mv_cesa_caps orion_caps = {
.nengines = 1,
.cipher_algs = orion_cipher_algs,
.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
.ahash_algs = orion_ahash_algs,
.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
.has_tdma = false,
};
static const struct mv_cesa_caps kirkwood_caps = {
.nengines = 1,
.cipher_algs = orion_cipher_algs,
.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
.ahash_algs = orion_ahash_algs,
.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
.has_tdma = true,
};
static const struct mv_cesa_caps armada_370_caps = {
.nengines = 1,
.cipher_algs = armada_370_cipher_algs,
.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
.ahash_algs = armada_370_ahash_algs,
.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
.has_tdma = true,
};
static const struct mv_cesa_caps armada_xp_caps = {
.nengines = 2,
.cipher_algs = armada_370_cipher_algs,
.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
.ahash_algs = armada_370_ahash_algs,
.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
.has_tdma = true,
};
static const struct of_device_id mv_cesa_of_match_table[] = {
{ .compatible = "marvell,orion-crypto", .data = &orion_caps },
{ .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
{ .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
{ .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
{ .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
{ .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
{ .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
{}
};
MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
static void
mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
const struct mbus_dram_target_info *dram)
{
void __iomem *iobase = engine->regs;
int i;
for (i = 0; i < 4; i++) {
writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
iobase + CESA_TDMA_WINDOW_CTRL(i));
writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
}
}
static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
{
struct device *dev = cesa->dev;
struct mv_cesa_dev_dma *dma;
if (!cesa->caps->has_tdma)
return 0;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
return -ENOMEM;
dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
sizeof(struct mv_cesa_tdma_desc),
16, 0);
if (!dma->tdma_desc_pool)
return -ENOMEM;
dma->op_pool = dmam_pool_create("cesa_op", dev,
sizeof(struct mv_cesa_op_ctx), 16, 0);
if (!dma->op_pool)
return -ENOMEM;
dma->cache_pool = dmam_pool_create("cesa_cache", dev,
CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
if (!dma->cache_pool)
return -ENOMEM;
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
if (!dma->padding_pool)
return -ENOMEM;
cesa->dma = dma;
return 0;
}
static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
const char *res_name = "sram";
struct resource *res;
engine->pool = of_gen_pool_get(cesa->dev->of_node,
"marvell,crypto-srams", idx);
if (engine->pool) {
engine->sram_pool = gen_pool_dma_alloc(engine->pool,
cesa->sram_size,
&engine->sram_dma);
if (engine->sram_pool)
return 0;
engine->pool = NULL;
return -ENOMEM;
}
if (cesa->caps->nengines > 1) {
if (!idx)
res_name = "sram0";
else
res_name = "sram1";
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
res_name);
if (!res || resource_size(res) < cesa->sram_size)
return -EINVAL;
engine->sram = devm_ioremap_resource(cesa->dev, res);
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
engine->sram_dma = dma_map_resource(cesa->dev, res->start,
cesa->sram_size,
DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(cesa->dev, engine->sram_dma))
return -ENOMEM;
return 0;
}
static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
if (engine->pool)
gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
cesa->sram_size);
else
dma_unmap_resource(cesa->dev, engine->sram_dma,
cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
{
const struct mv_cesa_caps *caps = &orion_caps;
const struct mbus_dram_target_info *dram;
const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
int irq, ret, i, cpu;
u32 sram_size;
if (cesa_dev) {
dev_err(&pdev->dev, "Only one CESA device authorized\n");
return -EEXIST;
}
if (dev->of_node) {
match = of_match_node(mv_cesa_of_match_table, dev->of_node);
if (!match || !match->data)
return -ENOTSUPP;
caps = match->data;
}
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
if (!cesa)
return -ENOMEM;
cesa->caps = caps;
cesa->dev = dev;
sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
&sram_size);
if (sram_size < CESA_SA_MIN_SRAM_SIZE)
sram_size = CESA_SA_MIN_SRAM_SIZE;
cesa->sram_size = sram_size;
cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
GFP_KERNEL);
if (!cesa->engines)
return -ENOMEM;
spin_lock_init(&cesa->lock);
cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(cesa->regs))
return PTR_ERR(cesa->regs);
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
return ret;
dram = mv_mbus_dram_info_nooverlap();
platform_set_drvdata(pdev, cesa);
for (i = 0; i < caps->nengines; i++) {
struct mv_cesa_engine *engine = &cesa->engines[i];
char res_name[7];
engine->id = i;
spin_lock_init(&engine->lock);
ret = mv_cesa_get_sram(pdev, i);
if (ret)
goto err_cleanup;
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
goto err_cleanup;
}
engine->irq = irq;
/*
* Not all platforms can gate the CESA clocks: do not complain
* if the clock does not exist.
*/
snprintf(res_name, sizeof(res_name), "cesa%d", i);
engine->clk = devm_clk_get(dev, res_name);
if (IS_ERR(engine->clk)) {
engine->clk = devm_clk_get(dev, NULL);
if (IS_ERR(engine->clk))
engine->clk = NULL;
}
snprintf(res_name, sizeof(res_name), "cesaz%d", i);
engine->zclk = devm_clk_get(dev, res_name);
if (IS_ERR(engine->zclk))
engine->zclk = NULL;
ret = clk_prepare_enable(engine->clk);
if (ret)
goto err_cleanup;
ret = clk_prepare_enable(engine->zclk);
if (ret)
goto err_cleanup;
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
if (dram && cesa->caps->has_tdma)
mv_cesa_conf_mbus_windows(engine, dram);
writel(0, engine->regs + CESA_SA_INT_STATUS);
writel(CESA_SA_CFG_STOP_DIG_ERR,
engine->regs + CESA_SA_CFG);
writel(engine->sram_dma & CESA_SA_SRAM_MSK,
engine->regs + CESA_SA_DESC_P0);
ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
IRQF_ONESHOT,
dev_name(&pdev->dev),
engine);
if (ret)
goto err_cleanup;
/* Set affinity */
cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
atomic_set(&engine->load, 0);
INIT_LIST_HEAD(&engine->complete_queue);
}
cesa_dev = cesa;
ret = mv_cesa_add_algs(cesa);
if (ret) {
cesa_dev = NULL;
goto err_cleanup;
}
dev_info(dev, "CESA device successfully registered\n");
return 0;
err_cleanup:
for (i = 0; i < caps->nengines; i++) {
clk_disable_unprepare(cesa->engines[i].zclk);
clk_disable_unprepare(cesa->engines[i].clk);
mv_cesa_put_sram(pdev, i);
if (cesa->engines[i].irq > 0)
irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
return ret;
}
static int mv_cesa_remove(struct platform_device *pdev)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
int i;
mv_cesa_remove_algs(cesa);
for (i = 0; i < cesa->caps->nengines; i++) {
clk_disable_unprepare(cesa->engines[i].zclk);
clk_disable_unprepare(cesa->engines[i].clk);
mv_cesa_put_sram(pdev, i);
irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
return 0;
}
static const struct platform_device_id mv_cesa_plat_id_table[] = {
{ .name = "mv_crypto" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
.remove = mv_cesa_remove,
.id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
.of_match_table = mv_cesa_of_match_table,
},
};
module_platform_driver(marvell_cesa);
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/crypto/marvell/cesa/cesa.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cipher algorithms supported by the CESA: DES, 3DES and AES.
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
* Author: Arnaud Ebalard <arno@natisbad.org>
*
* This work is based on an initial version written by
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
*/
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include "cesa.h"
struct mv_cesa_des_ctx {
struct mv_cesa_ctx base;
u8 key[DES_KEY_SIZE];
};
struct mv_cesa_des3_ctx {
struct mv_cesa_ctx base;
u8 key[DES3_EDE_KEY_SIZE];
};
struct mv_cesa_aes_ctx {
struct mv_cesa_ctx base;
struct crypto_aes_ctx aes;
};
struct mv_cesa_skcipher_dma_iter {
struct mv_cesa_dma_iter base;
struct mv_cesa_sg_dma_iter src;
struct mv_cesa_sg_dma_iter dst;
};
static inline void
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
struct skcipher_request *req)
{
mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
}
static inline bool
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
{
iter->src.op_offset = 0;
iter->dst.op_offset = 0;
return mv_cesa_req_dma_iter_next_op(&iter->base);
}
static inline void
mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (req->dst != req->src) {
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
} else {
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_BIDIRECTIONAL);
}
mv_cesa_dma_cleanup(&creq->base);
}
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_skcipher_dma_cleanup(req);
}
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len = min_t(size_t, req->cryptlen - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
if (engine->pool)
memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
CESA_SA_DATA_SRAM_OFFSET, len,
sreq->offset);
sreq->size = len;
mv_cesa_set_crypt_op_len(&sreq->op, len);
/* FIXME: only update enc_len field */
if (!sreq->skip_ctx) {
if (engine->pool)
memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
sreq->skip_ctx = true;
} else if (engine->pool)
memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
u32 status)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
CESA_SA_DATA_SRAM_OFFSET, sreq->size,
sreq->offset);
sreq->offset += len;
if (sreq->offset < req->cryptlen)
return -EINPROGRESS;
return 0;
}
static int mv_cesa_skcipher_process(struct crypto_async_request *req,
u32 status)
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_req *basereq = &creq->base;
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
return mv_cesa_skcipher_std_process(skreq, status);
return mv_cesa_dma_process(basereq, status);
}
static void mv_cesa_skcipher_step(struct crypto_async_request *req)
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_dma_step(&creq->base);
else
mv_cesa_skcipher_std_step(skreq);
}
static inline void
mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_req *basereq = &creq->base;
mv_cesa_dma_prepare(basereq, basereq->engine);
}
static inline void
mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
sreq->size = 0;
sreq->offset = 0;
}
static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
struct mv_cesa_engine *engine)
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_skcipher_dma_prepare(skreq);
else
mv_cesa_skcipher_std_prepare(skreq);
}
static inline void
mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
{
struct skcipher_request *skreq = skcipher_request_cast(req);
mv_cesa_skcipher_cleanup(skreq);
}
static void
mv_cesa_skcipher_complete(struct crypto_async_request *req)
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
atomic_sub(skreq->cryptlen, &engine->load);
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
struct mv_cesa_req *basereq;
basereq = &creq->base;
memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
} else if (engine->pool)
memcpy(skreq->iv,
engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
else
memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
}
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
.step = mv_cesa_skcipher_step,
.process = mv_cesa_skcipher_process,
.cleanup = mv_cesa_skcipher_req_cleanup,
.complete = mv_cesa_skcipher_complete,
};
static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
{
void *ctx = crypto_tfm_ctx(tfm);
memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
}
static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
{
struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->ops = &mv_cesa_skcipher_req_ops;
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct mv_cesa_skcipher_req));
return 0;
}
static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int remaining;
int offset;
int ret;
int i;
ret = aes_expandkey(&ctx->aes, key, len);
if (ret)
return ret;
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
for (i = 0; i < remaining; i++)
ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
return 0;
}
static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
memcpy(ctx->key, key, DES_KEY_SIZE);
return 0;
}
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
return 0;
}
static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
const struct mv_cesa_op_ctx *op_templ)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_skcipher_dma_iter iter;
bool skip_ctx = false;
int ret;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
if (req->src != req->dst) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
if (!ret)
return -ENOMEM;
ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
if (!ret) {
ret = -ENOMEM;
goto err_unmap_src;
}
} else {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_BIDIRECTIONAL);
if (!ret)
return -ENOMEM;
}
mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_skcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
}
skip_ctx = true;
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
/* Add input transfers */
ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.src, flags);
if (ret)
goto err_free_tdma;
/* Add dummy desc to launch the crypto operation */
ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
/* Add output transfers */
ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.dst, flags);
if (ret)
goto err_free_tdma;
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
ret = mv_cesa_dma_add_result_op(&basereq->chain,
CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
return 0;
err_free_tdma:
mv_cesa_dma_cleanup(basereq);
if (req->dst != req->src)
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
err_unmap_src:
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
return ret;
}
static inline int
mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
const struct mv_cesa_op_ctx *op_templ)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_req *basereq = &creq->base;
sreq->op = *op_templ;
sreq->skip_ctx = false;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
return 0;
}
static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int blksize = crypto_skcipher_blocksize(tfm);
int ret;
if (!IS_ALIGNED(req->cryptlen, blksize))
return -EINVAL;
creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (creq->dst_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of dst SG");
return creq->dst_nents;
}
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
CESA_SA_DESC_CFG_OP_MSK);
if (cesa_dev->caps->has_tdma)
ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
else
ret = mv_cesa_skcipher_std_req_init(req, tmpl);
return ret;
}
static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
int ret;
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
engine = mv_cesa_select_engine(req->cryptlen);
mv_cesa_skcipher_prepare(&req->base, engine);
ret = mv_cesa_queue_req(&req->base, &creq->base);
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_skcipher_cleanup(req);
return ret;
}
static int mv_cesa_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_ENC);
return mv_cesa_des_op(req, &tmpl);
}
static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_DEC);
return mv_cesa_des_op(req, &tmpl);
}
struct skcipher_alg mv_cesa_ecb_des_alg = {
.setkey = mv_cesa_des_setkey,
.encrypt = mv_cesa_ecb_des_encrypt,
.decrypt = mv_cesa_ecb_des_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "mv-ecb-des",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = mv_cesa_skcipher_cra_init,
.cra_exit = mv_cesa_skcipher_cra_exit,
},
};
static int mv_cesa_cbc_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
return mv_cesa_cbc_des_op(req, &tmpl);
}
static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
return mv_cesa_cbc_des_op(req, &tmpl);
}
struct skcipher_alg mv_cesa_cbc_des_alg = {
.setkey = mv_cesa_des_setkey,
.encrypt = mv_cesa_cbc_des_encrypt,
.decrypt = mv_cesa_cbc_des_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.base = {
.cra_name = "cbc(des)",
.cra_driver_name = "mv-cbc-des",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = mv_cesa_skcipher_cra_init,
.cra_exit = mv_cesa_skcipher_cra_exit,
},
};
static int mv_cesa_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_ENC);
return mv_cesa_des3_op(req, &tmpl);
}
static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_DEC);
return mv_cesa_des3_op(req, &tmpl);
}
struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
.setkey = mv_cesa_des3_ede_setkey,
.encrypt = mv_cesa_ecb_des3_ede_encrypt,
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = mv_cesa_skcipher_cra_init,
.cra_exit = mv_cesa_skcipher_cra_exit,
},
};
static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_ENC);
return mv_cesa_cbc_des3_op(req, &tmpl);
}
static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_DEC);
return mv_cesa_cbc_des3_op(req, &tmpl);
}
struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
.setkey = mv_cesa_des3_ede_setkey,
.encrypt = mv_cesa_cbc_des3_ede_encrypt,
.decrypt = mv_cesa_cbc_des3_ede_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "mv-cbc-des3-ede",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = mv_cesa_skcipher_cra_init,
.cra_exit = mv_cesa_skcipher_cra_exit,
},
};
static int mv_cesa_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
int i;
u32 *key;
u32 cfg;
cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
key = ctx->aes.key_dec;
else
key = ctx->aes.key_enc;
for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
if (ctx->aes.key_length == 24)
cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
else if (ctx->aes.key_length == 32)
cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
mv_cesa_update_op_cfg(tmpl, cfg,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_ENC);
return mv_cesa_aes_op(req, &tmpl);
}
static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_DEC);
return mv_cesa_aes_op(req, &tmpl);
}
struct skcipher_alg mv_cesa_ecb_aes_alg = {
.setkey = mv_cesa_aes_setkey,
.encrypt = mv_cesa_ecb_aes_encrypt,
.decrypt = mv_cesa_ecb_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = mv_cesa_skcipher_cra_init,
.cra_exit = mv_cesa_skcipher_cra_exit,
},
};
static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
return mv_cesa_cbc_aes_op(req, &tmpl);
}
static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
return mv_cesa_cbc_aes_op(req, &tmpl);
}
struct skcipher_alg mv_cesa_cbc_aes_alg = {
.setkey = mv_cesa_aes_setkey,
.encrypt = mv_cesa_cbc_aes_encrypt,
.decrypt = mv_cesa_cbc_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = mv_cesa_skcipher_cra_init,
.cra_exit = mv_cesa_skcipher_cra_exit,
},
};
| linux-master | drivers/crypto/marvell/cesa/cipher.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "otx_cpt_common.h"
#include "otx_cptpf.h"
#define DRV_NAME "octeontx-cpt"
#define DRV_VERSION "1.0"
static void otx_cpt_disable_mbox_interrupts(struct otx_cpt_device *cpt)
{
/* Disable mbox(0) interrupts for all VFs */
writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1CX(0));
}
static void otx_cpt_enable_mbox_interrupts(struct otx_cpt_device *cpt)
{
/* Enable mbox(0) interrupts for all VFs */
writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1SX(0));
}
static irqreturn_t otx_cpt_mbx0_intr_handler(int __always_unused irq,
void *cpt)
{
otx_cpt_mbox_intr_handler(cpt, 0);
return IRQ_HANDLED;
}
static void otx_cpt_reset(struct otx_cpt_device *cpt)
{
writeq(1, cpt->reg_base + OTX_CPT_PF_RESET);
}
static void otx_cpt_find_max_enabled_cores(struct otx_cpt_device *cpt)
{
union otx_cptx_pf_constants pf_cnsts = {0};
pf_cnsts.u = readq(cpt->reg_base + OTX_CPT_PF_CONSTANTS);
cpt->eng_grps.avail.max_se_cnt = pf_cnsts.s.se;
cpt->eng_grps.avail.max_ae_cnt = pf_cnsts.s.ae;
}
static u32 otx_cpt_check_bist_status(struct otx_cpt_device *cpt)
{
union otx_cptx_pf_bist_status bist_sts = {0};
bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_BIST_STATUS);
return bist_sts.u;
}
static u64 otx_cpt_check_exe_bist_status(struct otx_cpt_device *cpt)
{
union otx_cptx_pf_exe_bist_status bist_sts = {0};
bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_EXE_BIST_STATUS);
return bist_sts.u;
}
static int otx_cpt_device_init(struct otx_cpt_device *cpt)
{
struct device *dev = &cpt->pdev->dev;
u16 sdevid;
u64 bist;
/* Reset the PF when probed first */
otx_cpt_reset(cpt);
mdelay(100);
pci_read_config_word(cpt->pdev, PCI_SUBSYSTEM_ID, &sdevid);
/* Check BIST status */
bist = (u64)otx_cpt_check_bist_status(cpt);
if (bist) {
dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
bist = otx_cpt_check_exe_bist_status(cpt);
if (bist) {
dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
/* Get max enabled cores */
otx_cpt_find_max_enabled_cores(cpt);
if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
(cpt->eng_grps.avail.max_se_cnt == 0)) {
cpt->pf_type = OTX_CPT_AE;
} else if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
(cpt->eng_grps.avail.max_ae_cnt == 0)) {
cpt->pf_type = OTX_CPT_SE;
}
/* Get max VQs/VFs supported by the device */
cpt->max_vfs = pci_sriov_get_totalvfs(cpt->pdev);
/* Disable all cores */
otx_cpt_disable_all_cores(cpt);
return 0;
}
static int otx_cpt_register_interrupts(struct otx_cpt_device *cpt)
{
struct device *dev = &cpt->pdev->dev;
u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
u32 num_vec = OTX_CPT_PF_MSIX_VECTORS;
int ret;
/* Enable MSI-X */
ret = pci_alloc_irq_vectors(cpt->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&cpt->pdev->dev,
"Request for #%d msix vectors failed\n",
num_vec);
return ret;
}
/* Register mailbox interrupt handlers */
ret = request_irq(pci_irq_vector(cpt->pdev,
OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
otx_cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
if (ret) {
dev_err(dev, "Request irq failed\n");
pci_free_irq_vectors(cpt->pdev);
return ret;
}
/* Enable mailbox interrupt */
otx_cpt_enable_mbox_interrupts(cpt);
return 0;
}
static void otx_cpt_unregister_interrupts(struct otx_cpt_device *cpt)
{
u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
otx_cpt_disable_mbox_interrupts(cpt);
free_irq(pci_irq_vector(cpt->pdev,
OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
cpt);
pci_free_irq_vectors(cpt->pdev);
}
static int otx_cpt_sriov_configure(struct pci_dev *pdev, int numvfs)
{
struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
int ret = 0;
if (numvfs > cpt->max_vfs)
numvfs = cpt->max_vfs;
if (numvfs > 0) {
ret = otx_cpt_try_create_default_eng_grps(cpt->pdev,
&cpt->eng_grps,
cpt->pf_type);
if (ret)
return ret;
cpt->vfs_enabled = numvfs;
ret = pci_enable_sriov(pdev, numvfs);
if (ret) {
cpt->vfs_enabled = 0;
return ret;
}
otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, true);
try_module_get(THIS_MODULE);
ret = numvfs;
} else {
pci_disable_sriov(pdev);
otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, false);
module_put(THIS_MODULE);
cpt->vfs_enabled = 0;
}
dev_notice(&cpt->pdev->dev, "VFs enabled: %d\n", ret);
return ret;
}
static int otx_cpt_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
struct otx_cpt_device *cpt;
int err;
cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
if (!cpt)
return -ENOMEM;
pci_set_drvdata(pdev, cpt);
cpt->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
goto err_clear_drvdata;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
goto err_disable_device;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
goto err_release_regions;
}
/* MAP PF's configuration registers */
cpt->reg_base = pci_iomap(pdev, OTX_CPT_PF_PCI_CFG_BAR, 0);
if (!cpt->reg_base) {
dev_err(dev, "Cannot map config register space, aborting\n");
err = -ENOMEM;
goto err_release_regions;
}
/* CPT device HW initialization */
err = otx_cpt_device_init(cpt);
if (err)
goto err_unmap_region;
/* Register interrupts */
err = otx_cpt_register_interrupts(cpt);
if (err)
goto err_unmap_region;
/* Initialize engine groups */
err = otx_cpt_init_eng_grps(pdev, &cpt->eng_grps, cpt->pf_type);
if (err)
goto err_unregister_interrupts;
return 0;
err_unregister_interrupts:
otx_cpt_unregister_interrupts(cpt);
err_unmap_region:
pci_iounmap(pdev, cpt->reg_base);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
err_clear_drvdata:
pci_set_drvdata(pdev, NULL);
return err;
}
static void otx_cpt_remove(struct pci_dev *pdev)
{
struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
if (!cpt)
return;
/* Disable VFs */
pci_disable_sriov(pdev);
/* Cleanup engine groups */
otx_cpt_cleanup_eng_grps(pdev, &cpt->eng_grps);
/* Disable CPT PF interrupts */
otx_cpt_unregister_interrupts(cpt);
/* Disengage SE and AE cores from all groups */
otx_cpt_disable_all_cores(cpt);
pci_iounmap(pdev, cpt->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
/* Supported devices */
static const struct pci_device_id otx_cpt_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX_CPT_PCI_PF_DEVICE_ID) },
{ 0, } /* end of table */
};
static struct pci_driver otx_cpt_pci_driver = {
.name = DRV_NAME,
.id_table = otx_cpt_id_table,
.probe = otx_cpt_probe,
.remove = otx_cpt_remove,
.sriov_configure = otx_cpt_sriov_configure
};
module_pci_driver(otx_cpt_pci_driver);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell OcteonTX CPT Physical Function Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx_cpt_id_table);
| linux-master | drivers/crypto/marvell/octeontx/otx_cptpf_main.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ctype.h>
#include <linux/firmware.h>
#include "otx_cpt_common.h"
#include "otx_cptpf_ucode.h"
#include "otx_cptpf.h"
#define CSR_DELAY 30
/* Tar archive defines */
#define TAR_MAGIC "ustar"
#define TAR_MAGIC_LEN 6
#define TAR_BLOCK_LEN 512
#define REGTYPE '0'
#define AREGTYPE '\0'
/* tar header as defined in POSIX 1003.1-1990. */
struct tar_hdr_t {
char name[100];
char mode[8];
char uid[8];
char gid[8];
char size[12];
char mtime[12];
char chksum[8];
char typeflag;
char linkname[100];
char magic[6];
char version[2];
char uname[32];
char gname[32];
char devmajor[8];
char devminor[8];
char prefix[155];
};
struct tar_blk_t {
union {
struct tar_hdr_t hdr;
char block[TAR_BLOCK_LEN];
};
};
struct tar_arch_info_t {
struct list_head ucodes;
const struct firmware *fw;
};
static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp)
{
struct otx_cpt_bitmap bmap = { {0} };
bool found = false;
int i;
if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx\n",
eng_grp->g->engs_num);
return bmap;
}
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
if (eng_grp->engs[i].type) {
bitmap_or(bmap.bits, bmap.bits,
eng_grp->engs[i].bmap,
eng_grp->g->engs_num);
bmap.size = eng_grp->g->engs_num;
found = true;
}
}
if (!found)
dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx);
return bmap;
}
static int is_eng_type(int val, int eng_type)
{
return val & (1 << eng_type);
}
static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
int eng_type)
{
return is_eng_type(eng_grps->eng_types_supported, eng_type);
}
static void set_ucode_filename(struct otx_cpt_ucode *ucode,
const char *filename)
{
strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
{
char *str = "unknown";
switch (eng_type) {
case OTX_CPT_SE_TYPES:
str = "SE";
break;
case OTX_CPT_AE_TYPES:
str = "AE";
break;
}
return str;
}
static char *get_ucode_type_str(int ucode_type)
{
char *str = "unknown";
switch (ucode_type) {
case (1 << OTX_CPT_SE_TYPES):
str = "SE";
break;
case (1 << OTX_CPT_AE_TYPES):
str = "AE";
break;
}
return str;
}
static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
{
char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
u32 i, val = 0;
u8 nn;
strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
nn = ucode_hdr->ver_num.nn;
if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
(nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
nn == OTX_CPT_SE_UC_TYPE3))
val |= 1 << OTX_CPT_SE_TYPES;
if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
nn == OTX_CPT_AE_UC_TYPE)
val |= 1 << OTX_CPT_AE_TYPES;
*ucode_type = val;
if (!val)
return -EINVAL;
if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
is_eng_type(val, OTX_CPT_SE_TYPES))
return -EINVAL;
return 0;
}
static int is_mem_zero(const char *ptr, int size)
{
int i;
for (i = 0; i < size; i++) {
if (ptr[i])
return 0;
}
return 1;
}
static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
{
struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
dma_addr_t dma_addr;
struct otx_cpt_bitmap bmap;
int i;
bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
if (eng_grp->mirror.is_ena)
dma_addr =
eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
else
dma_addr = eng_grp->ucode[0].align_dma;
/*
* Set UCODE_BASE only for the cores which are not used,
* other cores should have already valid UCODE_BASE set
*/
for_each_set_bit(i, bmap.bits, bmap.size)
if (!eng_grp->g->eng_ref_cnt[i])
writeq((u64) dma_addr, cpt->reg_base +
OTX_CPT_PF_ENGX_UCODE_BASE(i));
return 0;
}
static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
void *obj)
{
struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
struct otx_cpt_bitmap bmap = { {0} };
int timeout = 10;
int i, busy;
u64 reg;
bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
/* Detach the cores from group */
reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
for_each_set_bit(i, bmap.bits, bmap.size) {
if (reg & (1ull << i)) {
eng_grp->g->eng_ref_cnt[i]--;
reg &= ~(1ull << i);
}
}
writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
/* Wait for cores to become idle */
do {
busy = 0;
usleep_range(10000, 20000);
if (timeout-- < 0)
return -EBUSY;
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
for_each_set_bit(i, bmap.bits, bmap.size)
if (reg & (1ull << i)) {
busy = 1;
break;
}
} while (busy);
/* Disable the cores only if they are not used anymore */
reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
for_each_set_bit(i, bmap.bits, bmap.size)
if (!eng_grp->g->eng_ref_cnt[i])
reg &= ~(1ull << i);
writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
return 0;
}
static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
void *obj)
{
struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
struct otx_cpt_bitmap bmap;
u64 reg;
int i;
bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
/* Attach the cores to the group */
reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
for_each_set_bit(i, bmap.bits, bmap.size) {
if (!(reg & (1ull << i))) {
eng_grp->g->eng_ref_cnt[i]++;
reg |= 1ull << i;
}
}
writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
/* Enable the cores */
reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
for_each_set_bit(i, bmap.bits, bmap.size)
reg |= 1ull << i;
writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
return 0;
}
static int process_tar_file(struct device *dev,
struct tar_arch_info_t *tar_arch, char *filename,
const u8 *data, u32 size)
{
struct tar_ucode_info_t *tar_info;
struct otx_cpt_ucode_hdr *ucode_hdr;
int ucode_type, ucode_size;
unsigned int code_length;
/*
* If size is less than microcode header size then don't report
* an error because it might not be microcode file, just process
* next file from archive
*/
if (size < sizeof(struct otx_cpt_ucode_hdr))
return 0;
ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
/*
* If microcode version can't be found don't report an error
* because it might not be microcode file, just process next file
*/
if (get_ucode_type(ucode_hdr, &ucode_type))
return 0;
code_length = ntohl(ucode_hdr->code_length);
if (code_length >= INT_MAX / 2) {
dev_err(dev, "Invalid code_length %u\n", code_length);
return -EINVAL;
}
ucode_size = code_length * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", filename);
return -EINVAL;
}
tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
if (!tar_info)
return -ENOMEM;
tar_info->ucode_ptr = data;
set_ucode_filename(&tar_info->ucode, filename);
memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
OTX_CPT_UCODE_VER_STR_SZ);
tar_info->ucode.ver_num = ucode_hdr->ver_num;
tar_info->ucode.type = ucode_type;
tar_info->ucode.size = ucode_size;
list_add_tail(&tar_info->list, &tar_arch->ucodes);
return 0;
}
static void release_tar_archive(struct tar_arch_info_t *tar_arch)
{
struct tar_ucode_info_t *curr, *temp;
if (!tar_arch)
return;
list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
list_del(&curr->list);
kfree(curr);
}
release_firmware(tar_arch->fw);
kfree(tar_arch);
}
static struct tar_ucode_info_t *get_uc_from_tar_archive(
struct tar_arch_info_t *tar_arch,
int ucode_type)
{
struct tar_ucode_info_t *curr, *uc_found = NULL;
list_for_each_entry(curr, &tar_arch->ucodes, list) {
if (!is_eng_type(curr->ucode.type, ucode_type))
continue;
if (!uc_found) {
uc_found = curr;
continue;
}
switch (ucode_type) {
case OTX_CPT_AE_TYPES:
break;
case OTX_CPT_SE_TYPES:
if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
(uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
&& curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
uc_found = curr;
break;
}
}
return uc_found;
}
static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
char *tar_filename)
{
struct tar_ucode_info_t *curr;
pr_debug("Tar archive filename %s\n", tar_filename);
pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
tar_arch->fw->size);
list_for_each_entry(curr, &tar_arch->ucodes, list) {
pr_debug("Ucode filename %s\n", curr->ucode.filename);
pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
pr_debug("Ucode version %d.%d.%d.%d\n",
curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
get_ucode_type_str(curr->ucode.type));
pr_debug("Ucode size %d\n", curr->ucode.size);
pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
}
}
static struct tar_arch_info_t *load_tar_archive(struct device *dev,
char *tar_filename)
{
struct tar_arch_info_t *tar_arch = NULL;
struct tar_blk_t *tar_blk;
unsigned int cur_size;
size_t tar_offs = 0;
size_t tar_size;
int ret;
tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
if (!tar_arch)
return NULL;
INIT_LIST_HEAD(&tar_arch->ucodes);
/* Load tar archive */
ret = request_firmware(&tar_arch->fw, tar_filename, dev);
if (ret)
goto release_tar_arch;
if (tar_arch->fw->size < TAR_BLOCK_LEN) {
dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
tar_size = tar_arch->fw->size;
tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
dev_err(dev, "Unsupported format of tar archive %s\n",
tar_filename);
goto release_tar_arch;
}
while (1) {
/* Read current file size */
ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
if (ret)
goto release_tar_arch;
if (tar_offs + cur_size > tar_size ||
tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
tar_offs += TAR_BLOCK_LEN;
if (tar_blk->hdr.typeflag == REGTYPE ||
tar_blk->hdr.typeflag == AREGTYPE) {
ret = process_tar_file(dev, tar_arch,
tar_blk->hdr.name,
&tar_arch->fw->data[tar_offs],
cur_size);
if (ret)
goto release_tar_arch;
}
tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
if (cur_size % TAR_BLOCK_LEN)
tar_offs += TAR_BLOCK_LEN;
/* Check for the end of the archive */
if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
if (is_mem_zero(&tar_arch->fw->data[tar_offs],
2*TAR_BLOCK_LEN))
break;
/* Read next block from tar archive */
tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
}
print_tar_dbg_info(tar_arch, tar_filename);
return tar_arch;
release_tar_arch:
release_tar_archive(tar_arch);
return NULL;
}
static struct otx_cpt_engs_rsvd *find_engines_by_type(
struct otx_cpt_eng_grp_info *eng_grp,
int eng_type)
{
int i;
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!eng_grp->engs[i].type)
continue;
if (eng_grp->engs[i].type == eng_type)
return &eng_grp->engs[i];
}
return NULL;
}
int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
{
return is_eng_type(ucode->type, eng_type);
}
EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
int eng_type)
{
struct otx_cpt_engs_rsvd *engs;
engs = find_engines_by_type(eng_grp, eng_type);
return (engs != NULL ? 1 : 0);
}
EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
char *buf, int size)
{
if (eng_grp->mirror.is_ena) {
scnprintf(buf, size, "%s (shared with engine_group%d)",
eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
eng_grp->mirror.idx);
} else {
scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
}
}
static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
char *buf, int size, int idx)
{
struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
struct otx_cpt_engs_rsvd *engs;
int len, i;
buf[0] = '\0';
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
if (idx != -1 && idx != i)
continue;
if (eng_grp->mirror.is_ena)
mirrored_engs = find_engines_by_type(
&eng_grp->g->grp[eng_grp->mirror.idx],
engs->type);
if (i > 0 && idx == -1) {
len = strlen(buf);
scnprintf(buf+len, size-len, ", ");
}
len = strlen(buf);
scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
engs->count + mirrored_engs->count : engs->count,
get_eng_type_str(engs->type));
if (mirrored_engs) {
len = strlen(buf);
scnprintf(buf+len, size-len,
"(%d shared with engine_group%d) ",
engs->count <= 0 ? engs->count +
mirrored_engs->count : mirrored_engs->count,
eng_grp->mirror.idx);
}
}
}
static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
{
pr_debug("Ucode info\n");
pr_debug("Ucode version string %s\n", ucode->ver_str);
pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
pr_debug("Ucode size %d\n", ucode->size);
pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
}
static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
struct device *dev, char *buf, int size)
{
struct otx_cpt_bitmap bmap;
u32 mask[2];
bmap = get_cores_bmap(dev, eng_grp);
if (!bmap.size) {
scnprintf(buf, size, "unknown");
return;
}
bitmap_to_arr32(mask, bmap.bits, bmap.size);
scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
}
static void print_dbg_info(struct device *dev,
struct otx_cpt_eng_grps *eng_grps)
{
char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
struct otx_cpt_eng_grp_info *mirrored_grp;
char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
struct otx_cpt_eng_grp_info *grp;
struct otx_cpt_engs_rsvd *engs;
u32 mask[4];
int i, j;
pr_debug("Engine groups global info\n");
pr_debug("max SE %d, max AE %d\n",
eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
"enabled" : "disabled");
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
pr_debug("Ucode0 filename %s, version %s\n",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
grp->mirror.is_ena ?
mirrored_grp->ucode[0].ver_str :
grp->ucode[0].ver_str);
}
for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
engs = &grp->engs[j];
if (engs->type) {
print_engs_info(grp, engs_info,
2*OTX_CPT_UCODE_NAME_LENGTH, j);
pr_debug("Slot%d: %s\n", j, engs_info);
bitmap_to_arr32(mask, engs->bmap,
eng_grps->engs_num);
pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
mask[3], mask[2], mask[1], mask[0]);
} else
pr_debug("Slot%d not used\n", j);
}
if (grp->is_enabled) {
cpt_print_engines_mask(grp, dev, engs_mask,
OTX_CPT_UCODE_NAME_LENGTH);
pr_debug("Cmask: %s\n", engs_mask);
}
}
}
static int update_engines_avail_count(struct device *dev,
struct otx_cpt_engs_available *avail,
struct otx_cpt_engs_rsvd *engs, int val)
{
switch (engs->type) {
case OTX_CPT_SE_TYPES:
avail->se_cnt += val;
break;
case OTX_CPT_AE_TYPES:
avail->ae_cnt += val;
break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
return 0;
}
static int update_engines_offset(struct device *dev,
struct otx_cpt_engs_available *avail,
struct otx_cpt_engs_rsvd *engs)
{
switch (engs->type) {
case OTX_CPT_SE_TYPES:
engs->offset = 0;
break;
case OTX_CPT_AE_TYPES:
engs->offset = avail->max_se_cnt;
break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
return 0;
}
static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
{
int i, ret = 0;
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!grp->engs[i].type)
continue;
if (grp->engs[i].count > 0) {
ret = update_engines_avail_count(dev, &grp->g->avail,
&grp->engs[i],
grp->engs[i].count);
if (ret)
return ret;
}
grp->engs[i].type = 0;
grp->engs[i].count = 0;
grp->engs[i].offset = 0;
grp->engs[i].ucode = NULL;
bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
}
return 0;
}
static int do_reserve_engines(struct device *dev,
struct otx_cpt_eng_grp_info *grp,
struct otx_cpt_engines *req_engs)
{
struct otx_cpt_engs_rsvd *engs = NULL;
int i, ret;
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!grp->engs[i].type) {
engs = &grp->engs[i];
break;
}
}
if (!engs)
return -ENOMEM;
engs->type = req_engs->type;
engs->count = req_engs->count;
ret = update_engines_offset(dev, &grp->g->avail, engs);
if (ret)
return ret;
if (engs->count > 0) {
ret = update_engines_avail_count(dev, &grp->g->avail, engs,
-engs->count);
if (ret)
return ret;
}
return 0;
}
static int check_engines_availability(struct device *dev,
struct otx_cpt_eng_grp_info *grp,
struct otx_cpt_engines *req_eng)
{
int avail_cnt = 0;
switch (req_eng->type) {
case OTX_CPT_SE_TYPES:
avail_cnt = grp->g->avail.se_cnt;
break;
case OTX_CPT_AE_TYPES:
avail_cnt = grp->g->avail.ae_cnt;
break;
default:
dev_err(dev, "Invalid engine type %d\n", req_eng->type);
return -EINVAL;
}
if (avail_cnt < req_eng->count) {
dev_err(dev,
"Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count);
return -EBUSY;
}
return 0;
}
static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
struct otx_cpt_engines *req_engs, int req_cnt)
{
int i, ret;
/* Validate if a number of requested engines is available */
for (i = 0; i < req_cnt; i++) {
ret = check_engines_availability(dev, grp, &req_engs[i]);
if (ret)
return ret;
}
/* Reserve requested engines for this engine group */
for (i = 0; i < req_cnt; i++) {
ret = do_reserve_engines(dev, grp, &req_engs[i]);
if (ret)
return ret;
}
return 0;
}
static ssize_t eng_grp_info_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
struct otx_cpt_eng_grp_info *eng_grp;
int ret;
eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
mutex_lock(&eng_grp->g->lock);
print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
cpt_print_engines_mask(eng_grp, dev, engs_mask,
OTX_CPT_UCODE_NAME_LENGTH);
ret = scnprintf(buf, PAGE_SIZE,
"Microcode : %s\nEngines: %s\nEngines mask: %s\n",
ucode_info, engs_info, engs_mask);
mutex_unlock(&eng_grp->g->lock);
return ret;
}
static int create_sysfs_eng_grps_info(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp)
{
eng_grp->info_attr.show = eng_grp_info_show;
eng_grp->info_attr.store = NULL;
eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
eng_grp->info_attr.attr.mode = 0440;
sysfs_attr_init(&eng_grp->info_attr.attr);
return device_create_file(dev, &eng_grp->info_attr);
}
static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
{
if (ucode->va) {
dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
ucode->va, ucode->dma);
ucode->va = NULL;
ucode->align_va = NULL;
ucode->dma = 0;
ucode->align_dma = 0;
ucode->size = 0;
}
memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
set_ucode_filename(ucode, "");
ucode->type = 0;
}
static int copy_ucode_to_dma_mem(struct device *dev,
struct otx_cpt_ucode *ucode,
const u8 *ucode_data)
{
u32 i;
/* Allocate DMAable space */
ucode->va = dma_alloc_coherent(dev, ucode->size +
OTX_CPT_UCODE_ALIGNMENT,
&ucode->dma, GFP_KERNEL);
if (!ucode->va) {
dev_err(dev, "Unable to allocate space for microcode\n");
return -ENOMEM;
}
ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
memcpy((void *) ucode->align_va, (void *) ucode_data +
sizeof(struct otx_cpt_ucode_hdr), ucode->size);
/* Byte swap 64-bit */
for (i = 0; i < (ucode->size / 8); i++)
((__be64 *)ucode->align_va)[i] =
cpu_to_be64(((u64 *)ucode->align_va)[i]);
/* Ucode needs 16-bit swap */
for (i = 0; i < (ucode->size / 2); i++)
((__be16 *)ucode->align_va)[i] =
cpu_to_be16(((u16 *)ucode->align_va)[i]);
return 0;
}
static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
const char *ucode_filename)
{
struct otx_cpt_ucode_hdr *ucode_hdr;
const struct firmware *fw;
unsigned int code_length;
int ret;
set_ucode_filename(ucode, ucode_filename);
ret = request_firmware(&fw, ucode->filename, dev);
if (ret)
return ret;
ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
ucode->ver_num = ucode_hdr->ver_num;
code_length = ntohl(ucode_hdr->code_length);
if (code_length >= INT_MAX / 2) {
dev_err(dev, "Ucode invalid code_length %u\n", code_length);
ret = -EINVAL;
goto release_fw;
}
ucode->size = code_length * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
ret = -EINVAL;
goto release_fw;
}
ret = get_ucode_type(ucode_hdr, &ucode->type);
if (ret) {
dev_err(dev, "Microcode %s unknown type 0x%x\n",
ucode->filename, ucode->type);
goto release_fw;
}
ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
if (ret)
goto release_fw;
print_ucode_dbg_info(ucode);
release_fw:
release_firmware(fw);
return ret;
}
static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
void *obj)
{
int ret;
ret = cpt_set_ucode_base(eng_grp, obj);
if (ret)
return ret;
ret = cpt_attach_and_enable_cores(eng_grp, obj);
return ret;
}
static int disable_eng_grp(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp,
void *obj)
{
int i, ret;
ret = cpt_detach_and_disable_cores(eng_grp, obj);
if (ret)
return ret;
/* Unload ucode used by this engine group */
ucode_unload(dev, &eng_grp->ucode[0]);
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
if (!eng_grp->engs[i].type)
continue;
eng_grp->engs[i].ucode = &eng_grp->ucode[0];
}
ret = cpt_set_ucode_base(eng_grp, obj);
return ret;
}
static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
struct otx_cpt_eng_grp_info *src_grp)
{
/* Setup fields for engine group which is mirrored */
src_grp->mirror.is_ena = false;
src_grp->mirror.idx = 0;
src_grp->mirror.ref_count++;
/* Setup fields for mirroring engine group */
dst_grp->mirror.is_ena = true;
dst_grp->mirror.idx = src_grp->idx;
dst_grp->mirror.ref_count = 0;
}
static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
{
struct otx_cpt_eng_grp_info *src_grp;
if (!dst_grp->mirror.is_ena)
return;
src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
src_grp->mirror.ref_count--;
dst_grp->mirror.is_ena = false;
dst_grp->mirror.idx = 0;
dst_grp->mirror.ref_count = 0;
}
static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
struct otx_cpt_engines *engs, int engs_cnt)
{
struct otx_cpt_engs_rsvd *mirrored_engs;
int i;
for (i = 0; i < engs_cnt; i++) {
mirrored_engs = find_engines_by_type(mirrored_eng_grp,
engs[i].type);
if (!mirrored_engs)
continue;
/*
* If mirrored group has this type of engines attached then
* there are 3 scenarios possible:
* 1) mirrored_engs.count == engs[i].count then all engines
* from mirrored engine group will be shared with this engine
* group
* 2) mirrored_engs.count > engs[i].count then only a subset of
* engines from mirrored engine group will be shared with this
* engine group
* 3) mirrored_engs.count < engs[i].count then all engines
* from mirrored engine group will be shared with this group
* and additional engines will be reserved for exclusively use
* by this engine group
*/
engs[i].count -= mirrored_engs->count;
}
}
static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
struct otx_cpt_eng_grp_info *grp)
{
struct otx_cpt_eng_grps *eng_grps = grp->g;
int i;
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (!eng_grps->grp[i].is_enabled)
continue;
if (eng_grps->grp[i].ucode[0].type)
continue;
if (grp->idx == i)
continue;
if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
grp->ucode[0].ver_str,
OTX_CPT_UCODE_VER_STR_SZ))
return &eng_grps->grp[i];
}
return NULL;
}
static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
struct otx_cpt_eng_grps *eng_grps)
{
int i;
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (!eng_grps->grp[i].is_enabled)
return &eng_grps->grp[i];
}
return NULL;
}
static int eng_grp_update_masks(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp)
{
struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
struct otx_cpt_bitmap tmp_bmap = { {0} };
int i, j, cnt, max_cnt;
int bit;
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
if (engs->count <= 0)
continue;
switch (engs->type) {
case OTX_CPT_SE_TYPES:
max_cnt = eng_grp->g->avail.max_se_cnt;
break;
case OTX_CPT_AE_TYPES:
max_cnt = eng_grp->g->avail.max_ae_cnt;
break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
cnt = engs->count;
WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
for (j = engs->offset; j < engs->offset + max_cnt; j++) {
if (!eng_grp->g->eng_ref_cnt[j]) {
bitmap_set(tmp_bmap.bits, j, 1);
cnt--;
if (!cnt)
break;
}
}
if (cnt)
return -ENOSPC;
bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
}
if (!eng_grp->mirror.is_ena)
return 0;
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i];
if (!engs->type)
continue;
mirrored_engs = find_engines_by_type(
&eng_grp->g->grp[eng_grp->mirror.idx],
engs->type);
WARN_ON(!mirrored_engs && engs->count <= 0);
if (!mirrored_engs)
continue;
bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
eng_grp->g->engs_num);
if (engs->count < 0) {
bit = find_first_bit(mirrored_engs->bmap,
eng_grp->g->engs_num);
bitmap_clear(tmp_bmap.bits, bit, -engs->count);
}
bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
eng_grp->g->engs_num);
}
return 0;
}
static int delete_engine_group(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp)
{
int i, ret;
if (!eng_grp->is_enabled)
return -EINVAL;
if (eng_grp->mirror.ref_count) {
dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
eng_grp->idx);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (eng_grp->g->grp[i].mirror.is_ena &&
eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
pr_cont(" %d", i);
}
pr_cont("\n");
return -EINVAL;
}
/* Removing engine group mirroring if enabled */
remove_eng_grp_mirroring(eng_grp);
/* Disable engine group */
ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
if (ret)
return ret;
/* Release all engines held by this engine group */
ret = release_engines(dev, eng_grp);
if (ret)
return ret;
device_remove_file(dev, &eng_grp->info_attr);
eng_grp->is_enabled = false;
return 0;
}
static int validate_1_ucode_scenario(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp,
struct otx_cpt_engines *engs, int engs_cnt)
{
int i;
/* Verify that ucode loaded supports requested engine types */
for (i = 0; i < engs_cnt; i++) {
if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
engs[i].type)) {
dev_err(dev,
"Microcode %s does not support %s engines\n",
eng_grp->ucode[0].filename,
get_eng_type_str(engs[i].type));
return -EINVAL;
}
}
return 0;
}
static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
{
struct otx_cpt_ucode *ucode;
if (eng_grp->mirror.is_ena)
ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
else
ucode = &eng_grp->ucode[0];
WARN_ON(!eng_grp->engs[0].type);
eng_grp->engs[0].ucode = ucode;
}
static int create_engine_group(struct device *dev,
struct otx_cpt_eng_grps *eng_grps,
struct otx_cpt_engines *engs, int engs_cnt,
void *ucode_data[], int ucodes_cnt,
bool use_uc_from_tar_arch)
{
struct otx_cpt_eng_grp_info *mirrored_eng_grp;
struct tar_ucode_info_t *tar_info;
struct otx_cpt_eng_grp_info *eng_grp;
int i, ret = 0;
if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
return -EINVAL;
/* Validate if requested engine types are supported by this device */
for (i = 0; i < engs_cnt; i++)
if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
dev_err(dev, "Device does not support %s engines\n",
get_eng_type_str(engs[i].type));
return -EPERM;
}
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps);
if (!eng_grp) {
dev_err(dev, "Error all engine groups are being used\n");
return -ENOSPC;
}
/* Load ucode */
for (i = 0; i < ucodes_cnt; i++) {
if (use_uc_from_tar_arch) {
tar_info = (struct tar_ucode_info_t *) ucode_data[i];
eng_grp->ucode[i] = tar_info->ucode;
ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
tar_info->ucode_ptr);
} else
ret = ucode_load(dev, &eng_grp->ucode[i],
(char *) ucode_data[i]);
if (ret)
goto err_ucode_unload;
}
/* Validate scenario where 1 ucode is used */
ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
if (ret)
goto err_ucode_unload;
/* Check if this group mirrors another existing engine group */
mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
if (mirrored_eng_grp) {
/* Setup mirroring */
setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
/*
* Update count of requested engines because some
* of them might be shared with mirrored group
*/
update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
}
/* Reserve engines */
ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
if (ret)
goto err_ucode_unload;
/* Update ucode pointers used by engines */
update_ucode_ptrs(eng_grp);
/* Update engine masks used by this group */
ret = eng_grp_update_masks(dev, eng_grp);
if (ret)
goto err_release_engs;
/* Create sysfs entry for engine group info */
ret = create_sysfs_eng_grps_info(dev, eng_grp);
if (ret)
goto err_release_engs;
/* Enable engine group */
ret = enable_eng_grp(eng_grp, eng_grps->obj);
if (ret)
goto err_release_engs;
/*
* If this engine group mirrors another engine group
* then we need to unload ucode as we will use ucode
* from mirrored engine group
*/
if (eng_grp->mirror.is_ena)
ucode_unload(dev, &eng_grp->ucode[0]);
eng_grp->is_enabled = true;
if (eng_grp->mirror.is_ena)
dev_info(dev,
"Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx);
else
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
return 0;
err_release_engs:
release_engines(dev, eng_grp);
err_ucode_unload:
ucode_unload(dev, &eng_grp->ucode[0]);
return ret;
}
static ssize_t ucode_load_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
char *start, *val, *err_msg, *tmp;
struct otx_cpt_eng_grps *eng_grps;
int grp_idx = 0, ret = -EINVAL;
bool has_se, has_ie, has_ae;
int del_grp_idx = -1;
int ucode_idx = 0;
if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
return -EINVAL;
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
err_msg = "Invalid engine group format";
strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
start = tmp_buf;
has_se = has_ie = has_ae = false;
for (;;) {
val = strsep(&start, ";");
if (!val)
break;
val = strim(val);
if (!*val)
continue;
if (!strncasecmp(val, "engine_group", 12)) {
if (del_grp_idx != -1)
goto err_print;
tmp = strim(strsep(&val, ":"));
if (!val)
goto err_print;
if (strlen(tmp) != 13)
goto err_print;
if (kstrtoint((tmp + 12), 10, &del_grp_idx))
goto err_print;
val = strim(val);
if (strncasecmp(val, "null", 4))
goto err_print;
if (strlen(val) != 4)
goto err_print;
} else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
if (has_se || ucode_idx)
goto err_print;
tmp = strim(strsep(&val, ":"));
if (!val)
goto err_print;
if (strlen(tmp) != 2)
goto err_print;
if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
goto err_print;
engs[grp_idx++].type = OTX_CPT_SE_TYPES;
has_se = true;
} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
if (has_ae || ucode_idx)
goto err_print;
tmp = strim(strsep(&val, ":"));
if (!val)
goto err_print;
if (strlen(tmp) != 2)
goto err_print;
if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
goto err_print;
engs[grp_idx++].type = OTX_CPT_AE_TYPES;
has_ae = true;
} else {
if (ucode_idx > 1)
goto err_print;
if (!strlen(val))
goto err_print;
if (strnstr(val, " ", strlen(val)))
goto err_print;
ucode_filename[ucode_idx++] = val;
}
}
/* Validate input parameters */
if (del_grp_idx == -1) {
if (!(grp_idx && ucode_idx))
goto err_print;
if (ucode_idx > 1 && grp_idx < 2)
goto err_print;
if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
err_msg = "Error max 2 engine types can be attached";
goto err_print;
}
} else {
if (del_grp_idx < 0 ||
del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Invalid engine group index %d\n",
del_grp_idx);
ret = -EINVAL;
return ret;
}
if (!eng_grps->grp[del_grp_idx].is_enabled) {
dev_err(dev, "Error engine_group%d is not configured\n",
del_grp_idx);
ret = -EINVAL;
return ret;
}
if (grp_idx || ucode_idx)
goto err_print;
}
mutex_lock(&eng_grps->lock);
if (eng_grps->is_rdonly) {
dev_err(dev, "Disable VFs before modifying engine groups\n");
ret = -EACCES;
goto err_unlock;
}
if (del_grp_idx == -1)
/* create engine group */
ret = create_engine_group(dev, eng_grps, engs, grp_idx,
(void **) ucode_filename,
ucode_idx, false);
else
/* delete engine group */
ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
if (ret)
goto err_unlock;
print_dbg_info(dev, eng_grps);
err_unlock:
mutex_unlock(&eng_grps->lock);
return ret ? ret : count;
err_print:
dev_err(dev, "%s\n", err_msg);
return ret;
}
int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps,
int pf_type)
{
struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
struct tar_arch_info_t *tar_arch = NULL;
char *tar_filename;
int i, ret = 0;
mutex_lock(&eng_grps->lock);
/*
* We don't create engine group for kernel crypto if attempt to create
* it was already made (when user enabled VFs for the first time)
*/
if (eng_grps->is_first_try)
goto unlock_mutex;
eng_grps->is_first_try = true;
/* We create group for kcrypto only if no groups are configured */
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
if (eng_grps->grp[i].is_enabled)
goto unlock_mutex;
switch (pf_type) {
case OTX_CPT_AE:
case OTX_CPT_SE:
tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
break;
default:
dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
ret = -EINVAL;
goto unlock_mutex;
}
tar_arch = load_tar_archive(&pdev->dev, tar_filename);
if (!tar_arch)
goto unlock_mutex;
/*
* If device supports SE engines and there is SE microcode in tar
* archive try to create engine group with SE engines for kernel
* crypto functionality (symmetric crypto)
*/
tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
if (tar_info[0] &&
dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
engs[0].type = OTX_CPT_SE_TYPES;
engs[0].count = eng_grps->avail.max_se_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) tar_info, 1, true);
if (ret)
goto release_tar_arch;
}
/*
* If device supports AE engines and there is AE microcode in tar
* archive try to create engine group with AE engines for asymmetric
* crypto functionality.
*/
tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
if (tar_info[0] &&
dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
engs[0].type = OTX_CPT_AE_TYPES;
engs[0].count = eng_grps->avail.max_ae_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) tar_info, 1, true);
if (ret)
goto release_tar_arch;
}
print_dbg_info(&pdev->dev, eng_grps);
release_tar_arch:
release_tar_archive(tar_arch);
unlock_mutex:
mutex_unlock(&eng_grps->lock);
return ret;
}
void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
bool is_rdonly)
{
mutex_lock(&eng_grps->lock);
eng_grps->is_rdonly = is_rdonly;
mutex_unlock(&eng_grps->lock);
}
void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
{
int grp, timeout = 100;
u64 reg;
/* Disengage the cores from groups */
for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
udelay(CSR_DELAY);
}
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
while (reg) {
udelay(CSR_DELAY);
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
if (timeout--) {
dev_warn(&cpt->pdev->dev, "Cores still busy\n");
break;
}
}
/* Disable the cores */
writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
}
void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps)
{
struct otx_cpt_eng_grp_info *grp;
int i, j;
mutex_lock(&eng_grps->lock);
if (eng_grps->is_ucode_load_created) {
device_remove_file(&pdev->dev,
&eng_grps->ucode_load_attr);
eng_grps->is_ucode_load_created = false;
}
/* First delete all mirroring engine groups */
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
if (eng_grps->grp[i].mirror.is_ena)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
/* Delete remaining engine groups */
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
/* Release memory */
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
kfree(grp->engs[j].bmap);
grp->engs[j].bmap = NULL;
}
}
mutex_unlock(&eng_grps->lock);
}
int otx_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps, int pf_type)
{
struct otx_cpt_eng_grp_info *grp;
int i, j, ret = 0;
mutex_init(&eng_grps->lock);
eng_grps->obj = pci_get_drvdata(pdev);
eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
eng_grps->engs_num = eng_grps->avail.max_se_cnt +
eng_grps->avail.max_ae_cnt;
if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(&pdev->dev,
"Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
ret = -EINVAL;
goto err;
}
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
grp->g = eng_grps;
grp->idx = i;
snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
"engine_group%d", i);
for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
grp->engs[j].bmap =
kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
sizeof(long), GFP_KERNEL);
if (!grp->engs[j].bmap) {
ret = -ENOMEM;
goto err;
}
}
}
switch (pf_type) {
case OTX_CPT_SE:
/* OcteonTX 83XX SE CPT PF has only SE engines attached */
eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
break;
case OTX_CPT_AE:
/* OcteonTX 83XX AE CPT PF has only AE engines attached */
eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
break;
default:
dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
ret = -EINVAL;
goto err;
}
eng_grps->ucode_load_attr.show = NULL;
eng_grps->ucode_load_attr.store = ucode_load_store;
eng_grps->ucode_load_attr.attr.name = "ucode_load";
eng_grps->ucode_load_attr.attr.mode = 0220;
sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
ret = device_create_file(&pdev->dev,
&eng_grps->ucode_load_attr);
if (ret)
goto err;
eng_grps->is_ucode_load_created = true;
print_dbg_info(&pdev->dev, eng_grps);
return ret;
err:
otx_cpt_cleanup_eng_grps(pdev, eng_grps);
return ret;
}
| linux-master | drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include "otx_cptvf.h"
#define CPT_MBOX_MSG_TIMEOUT 2000
static char *get_mbox_opcode_str(int msg_opcode)
{
char *str = "Unknown";
switch (msg_opcode) {
case OTX_CPT_MSG_VF_UP:
str = "UP";
break;
case OTX_CPT_MSG_VF_DOWN:
str = "DOWN";
break;
case OTX_CPT_MSG_READY:
str = "READY";
break;
case OTX_CPT_MSG_QLEN:
str = "QLEN";
break;
case OTX_CPT_MSG_QBIND_GRP:
str = "QBIND_GRP";
break;
case OTX_CPT_MSG_VQ_PRIORITY:
str = "VQ_PRIORITY";
break;
case OTX_CPT_MSG_PF_TYPE:
str = "PF_TYPE";
break;
case OTX_CPT_MSG_ACK:
str = "ACK";
break;
case OTX_CPT_MSG_NACK:
str = "NACK";
break;
}
return str;
}
static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
{
char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
pr_debug("MBOX msg %s received from VF%d raw_data %s",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
pr_debug("MBOX msg %s received from PF raw_data %s",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
static void cptvf_send_msg_to_pf(struct otx_cptvf *cptvf,
struct otx_cpt_mbox *mbx)
{
/* Writing mbox(1) causes interrupt */
writeq(mbx->msg, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
writeq(mbx->data, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
}
/* Interrupt handler to handle mailbox messages from VFs */
void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
/*
* MBOX[0] contains msg
* MBOX[1] contains data
*/
mbx.msg = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
mbx.data = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
dump_mbox_msg(&mbx, -1);
switch (mbx.msg) {
case OTX_CPT_MSG_VF_UP:
cptvf->pf_acked = true;
cptvf->num_vfs = mbx.data;
break;
case OTX_CPT_MSG_READY:
cptvf->pf_acked = true;
cptvf->vfid = mbx.data;
dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid);
break;
case OTX_CPT_MSG_QBIND_GRP:
cptvf->pf_acked = true;
cptvf->vftype = mbx.data;
dev_dbg(&cptvf->pdev->dev, "VF %d type %s group %d\n",
cptvf->vfid,
((mbx.data == OTX_CPT_SE_TYPES) ? "SE" : "AE"),
cptvf->vfgrp);
break;
case OTX_CPT_MSG_ACK:
cptvf->pf_acked = true;
break;
case OTX_CPT_MSG_NACK:
cptvf->pf_nacked = true;
break;
default:
dev_err(&cptvf->pdev->dev, "Invalid msg from PF, msg 0x%llx\n",
mbx.msg);
break;
}
}
static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
struct otx_cpt_mbox *mbx)
{
int timeout = CPT_MBOX_MSG_TIMEOUT;
int sleep = 10;
cptvf->pf_acked = false;
cptvf->pf_nacked = false;
cptvf_send_msg_to_pf(cptvf, mbx);
/* Wait for previous message to be acked, timeout 2sec */
while (!cptvf->pf_acked) {
if (cptvf->pf_nacked)
return -EINVAL;
msleep(sleep);
if (cptvf->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
dev_err(&cptvf->pdev->dev,
"PF didn't ack to mbox msg %llx from VF%u\n",
mbx->msg, cptvf->vfid);
return -EBUSY;
}
}
return 0;
}
/*
* Checks if VF is able to comminicate with PF
* and also gets the CPT number this VF is associated to.
*/
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
mbx.msg = OTX_CPT_MSG_READY;
return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
* Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
* Must be ACKed.
*/
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
mbx.msg = OTX_CPT_MSG_QLEN;
mbx.data = cptvf->qsize;
return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
* Communicate VF group required to PF and get the VQ binded to that group
*/
int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_QBIND_GRP;
/* Convey group of the VF */
mbx.data = group;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
if (ret)
return ret;
cptvf->vfgrp = group;
return 0;
}
/*
* Communicate VF group required to PF and get the VQ binded to that group
*/
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
/* Convey group of the VF */
mbx.data = cptvf->priority;
return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
* Communicate to PF that VF is UP and running
*/
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
mbx.msg = OTX_CPT_MSG_VF_UP;
return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
* Communicate to PF that VF is DOWN and running
*/
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
mbx.msg = OTX_CPT_MSG_VF_DOWN;
return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
| linux-master | drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include "otx_cptvf.h"
#include "otx_cptvf_algs.h"
#include "otx_cptvf_reqmgr.h"
#define DRV_NAME "octeontx-cptvf"
#define DRV_VERSION "1.0"
static void vq_work_handler(unsigned long data)
{
struct otx_cptvf_wqe_info *cwqe_info =
(struct otx_cptvf_wqe_info *) data;
otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
}
static int init_worker_threads(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
struct otx_cptvf_wqe_info *cwqe_info;
int i;
cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
if (!cwqe_info)
return -ENOMEM;
if (cptvf->num_queues) {
dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
cptvf->num_queues);
}
for (i = 0; i < cptvf->num_queues; i++) {
tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
(u64)cwqe_info);
cwqe_info->vq_wqe[i].cptvf = cptvf;
}
cptvf->wqe_info = cwqe_info;
return 0;
}
static void cleanup_worker_threads(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
struct otx_cptvf_wqe_info *cwqe_info;
int i;
cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
if (!cwqe_info)
return;
if (cptvf->num_queues) {
dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
cptvf->num_queues);
}
for (i = 0; i < cptvf->num_queues; i++)
tasklet_kill(&cwqe_info->vq_wqe[i].twork);
kfree_sensitive(cwqe_info);
cptvf->wqe_info = NULL;
}
static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
{
struct otx_cpt_pending_queue *queue;
int i;
for_each_pending_queue(pqinfo, queue, i) {
if (!queue->head)
continue;
/* free single queue */
kfree_sensitive((queue->head));
queue->front = 0;
queue->rear = 0;
queue->qlen = 0;
}
pqinfo->num_queues = 0;
}
static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
u32 num_queues)
{
struct otx_cpt_pending_queue *queue = NULL;
int ret;
u32 i;
pqinfo->num_queues = num_queues;
for_each_pending_queue(pqinfo, queue, i) {
queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
if (!queue->head) {
ret = -ENOMEM;
goto pending_qfail;
}
queue->pending_count = 0;
queue->front = 0;
queue->rear = 0;
queue->qlen = qlen;
/* init queue spin lock */
spin_lock_init(&queue->lock);
}
return 0;
pending_qfail:
free_pending_queues(pqinfo);
return ret;
}
static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
u32 num_queues)
{
struct pci_dev *pdev = cptvf->pdev;
int ret;
if (!num_queues)
return 0;
ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
if (ret) {
dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
num_queues);
return ret;
}
return 0;
}
static void cleanup_pending_queues(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
if (!cptvf->num_queues)
return;
dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
cptvf->num_queues);
free_pending_queues(&cptvf->pqinfo);
}
static void free_command_queues(struct otx_cptvf *cptvf,
struct otx_cpt_cmd_qinfo *cqinfo)
{
struct otx_cpt_cmd_queue *queue = NULL;
struct otx_cpt_cmd_chunk *chunk = NULL;
struct pci_dev *pdev = cptvf->pdev;
int i;
/* clean up for each queue */
for (i = 0; i < cptvf->num_queues; i++) {
queue = &cqinfo->queue[i];
while (!list_empty(&cqinfo->queue[i].chead)) {
chunk = list_first_entry(&cqinfo->queue[i].chead,
struct otx_cpt_cmd_chunk, nextchunk);
dma_free_coherent(&pdev->dev, chunk->size,
chunk->head,
chunk->dma_addr);
chunk->head = NULL;
chunk->dma_addr = 0;
list_del(&chunk->nextchunk);
kfree_sensitive(chunk);
}
queue->num_chunks = 0;
queue->idx = 0;
}
}
static int alloc_command_queues(struct otx_cptvf *cptvf,
struct otx_cpt_cmd_qinfo *cqinfo,
u32 qlen)
{
struct otx_cpt_cmd_chunk *curr, *first, *last;
struct otx_cpt_cmd_queue *queue = NULL;
struct pci_dev *pdev = cptvf->pdev;
size_t q_size, c_size, rem_q_size;
u32 qcsize_bytes;
int i;
/* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
cptvf->qsize = min(qlen, cqinfo->qchunksize) *
OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
/* Qsize in bytes to create space for alignment */
q_size = qlen * OTX_CPT_INST_SIZE;
qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
/* per queue initialization */
for (i = 0; i < cptvf->num_queues; i++) {
rem_q_size = q_size;
first = NULL;
last = NULL;
queue = &cqinfo->queue[i];
INIT_LIST_HEAD(&queue->chead);
do {
curr = kzalloc(sizeof(*curr), GFP_KERNEL);
if (!curr)
goto cmd_qfail;
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
rem_q_size;
curr->head = dma_alloc_coherent(&pdev->dev,
c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
&curr->dma_addr, GFP_KERNEL);
if (!curr->head) {
dev_err(&pdev->dev,
"Command Q (%d) chunk (%d) allocation failed\n",
i, queue->num_chunks);
goto free_curr;
}
curr->size = c_size;
if (queue->num_chunks == 0) {
first = curr;
queue->base = first;
}
list_add_tail(&curr->nextchunk,
&cqinfo->queue[i].chead);
queue->num_chunks++;
rem_q_size -= c_size;
if (last)
*((u64 *)(&last->head[last->size])) =
(u64)curr->dma_addr;
last = curr;
} while (rem_q_size);
/*
* Make the queue circular, tie back last chunk entry to head
*/
curr = first;
*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
queue->qhead = curr;
}
return 0;
free_curr:
kfree(curr);
cmd_qfail:
free_command_queues(cptvf, cqinfo);
return -ENOMEM;
}
static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
{
struct pci_dev *pdev = cptvf->pdev;
int ret;
/* setup command queues */
ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
if (ret) {
dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
cptvf->num_queues);
return ret;
}
return ret;
}
static void cleanup_command_queues(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
if (!cptvf->num_queues)
return;
dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
cptvf->num_queues);
free_command_queues(cptvf, &cptvf->cqinfo);
}
static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
{
cleanup_worker_threads(cptvf);
cleanup_pending_queues(cptvf);
cleanup_command_queues(cptvf);
}
static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
{
struct pci_dev *pdev = cptvf->pdev;
u32 max_dev_queues = 0;
int ret;
max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
/* possible cpus */
num_queues = min_t(u32, num_queues, max_dev_queues);
cptvf->num_queues = num_queues;
ret = init_command_queues(cptvf, qlen);
if (ret) {
dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
num_queues);
return ret;
}
ret = init_pending_queues(cptvf, qlen, num_queues);
if (ret) {
dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
num_queues);
goto setup_pqfail;
}
/* Create worker threads for BH processing */
ret = init_worker_threads(cptvf);
if (ret) {
dev_err(&pdev->dev, "Failed to setup worker threads\n");
goto init_work_fail;
}
return 0;
init_work_fail:
cleanup_worker_threads(cptvf);
cleanup_pending_queues(cptvf);
setup_pqfail:
cleanup_command_queues(cptvf);
return ret;
}
static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
{
irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
free_cpumask_var(cptvf->affinity_mask[vec]);
}
static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
{
union otx_cptx_vqx_ctl vqx_ctl;
vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
vqx_ctl.s.ena = val;
writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
}
void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
{
union otx_cptx_vqx_doorbell vqx_dbell;
vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
}
static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
{
union otx_cptx_vqx_inprog vqx_inprg;
vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
vqx_inprg.s.inflight = val;
writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
}
static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
{
union otx_cptx_vqx_done_wait vqx_dwait;
vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
vqx_dwait.s.num_wait = val;
writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
}
static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_done_wait vqx_dwait;
vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
return vqx_dwait.s.num_wait;
}
static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
{
union otx_cptx_vqx_done_wait vqx_dwait;
vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
vqx_dwait.s.time_wait = time;
writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
}
static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_done_wait vqx_dwait;
vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
return vqx_dwait.s.time_wait;
}
static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
/* Enable SWERR interrupts for the requested VF */
vqx_misc_ena.s.swerr = 1;
writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
}
static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
/* Enable MBOX interrupt for the requested VF */
vqx_misc_ena.s.mbox = 1;
writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
}
static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
/* Enable DONE interrupt for the requested VF */
vqx_done_ena.s.done = 1;
writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
}
static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_int vqx_misc_int;
vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
/* W1C for the VF */
vqx_misc_int.s.dovf = 1;
writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
}
static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_int vqx_misc_int;
vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
/* W1C for the VF */
vqx_misc_int.s.irde = 1;
writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
}
static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_int vqx_misc_int;
vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
/* W1C for the VF */
vqx_misc_int.s.nwrp = 1;
writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
}
static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_int vqx_misc_int;
vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
/* W1C for the VF */
vqx_misc_int.s.mbox = 1;
writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
}
static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_misc_int vqx_misc_int;
vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
/* W1C for the VF */
vqx_misc_int.s.swerr = 1;
writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
}
static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
{
return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
}
static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
void *arg)
{
struct otx_cptvf *cptvf = arg;
struct pci_dev *pdev = cptvf->pdev;
u64 intr;
intr = cptvf_read_vf_misc_intr_status(cptvf);
/* Check for MISC interrupt types */
if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
intr, cptvf->vfid);
otx_cptvf_handle_mbox_intr(cptvf);
cptvf_clear_mbox_intr(cptvf);
} else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
cptvf_clear_dovf_intr(cptvf);
/* Clear doorbell count */
otx_cptvf_write_vq_doorbell(cptvf, 0);
dev_err(&pdev->dev,
"Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
intr, cptvf->vfid);
} else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
cptvf_clear_irde_intr(cptvf);
dev_err(&pdev->dev,
"Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
intr, cptvf->vfid);
} else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
cptvf_clear_nwrp_intr(cptvf);
dev_err(&pdev->dev,
"NCB response write error interrupt 0x%llx on CPT VF %d\n",
intr, cptvf->vfid);
} else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
cptvf_clear_swerr_intr(cptvf);
dev_err(&pdev->dev,
"Software error interrupt 0x%llx on CPT VF %d\n",
intr, cptvf->vfid);
} else {
dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
cptvf->vfid);
}
return IRQ_HANDLED;
}
static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
int qno)
{
struct otx_cptvf_wqe_info *nwqe_info;
if (unlikely(qno >= cptvf->num_queues))
return NULL;
nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
return &nwqe_info->vq_wqe[qno];
}
static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
{
union otx_cptx_vqx_done vqx_done;
vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
return vqx_done.s.done;
}
static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
u32 ackcnt)
{
union otx_cptx_vqx_done_ack vqx_dack_cnt;
vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
vqx_dack_cnt.s.done_ack = ackcnt;
writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
}
static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
void *cptvf_dev)
{
struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
struct pci_dev *pdev = cptvf->pdev;
/* Read the number of completions */
u32 intr = cptvf_read_vq_done_count(cptvf);
if (intr) {
struct otx_cptvf_wqe *wqe;
/*
* Acknowledge the number of scheduled completions for
* processing
*/
cptvf_write_vq_done_ack(cptvf, intr);
wqe = get_cptvf_vq_wqe(cptvf, 0);
if (unlikely(!wqe)) {
dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
cptvf->vfid);
return IRQ_NONE;
}
tasklet_hi_schedule(&wqe->twork);
}
return IRQ_HANDLED;
}
static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
{
struct pci_dev *pdev = cptvf->pdev;
int cpu;
if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
GFP_KERNEL)) {
dev_err(&pdev->dev,
"Allocation failed for affinity_mask for VF %d\n",
cptvf->vfid);
return;
}
cpu = cptvf->vfid % num_online_cpus();
cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
cptvf->affinity_mask[vec]);
irq_set_affinity_hint(pci_irq_vector(pdev, vec),
cptvf->affinity_mask[vec]);
}
static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
{
union otx_cptx_vqx_saddr vqx_saddr;
vqx_saddr.u = val;
writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
}
static void cptvf_device_init(struct otx_cptvf *cptvf)
{
u64 base_addr = 0;
/* Disable the VQ */
cptvf_write_vq_ctl(cptvf, 0);
/* Reset the doorbell */
otx_cptvf_write_vq_doorbell(cptvf, 0);
/* Clear inflight */
cptvf_write_vq_inprog(cptvf, 0);
/* Write VQ SADDR */
base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
cptvf_write_vq_saddr(cptvf, base_addr);
/* Configure timerhold / coalescence */
cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
/* Enable the VQ */
cptvf_write_vq_ctl(cptvf, 1);
/* Flag the VF ready */
cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
}
static ssize_t vf_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
char *msg;
switch (cptvf->vftype) {
case OTX_CPT_AE_TYPES:
msg = "AE";
break;
case OTX_CPT_SE_TYPES:
msg = "SE";
break;
default:
msg = "Invalid";
}
return sysfs_emit(buf, "%s\n", msg);
}
static ssize_t vf_engine_group_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", cptvf->vfgrp);
}
static ssize_t vf_engine_group_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
int val, ret;
ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
if (val < 0)
return -EINVAL;
if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Engine group >= than max available groups %d\n",
OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
if (ret)
return ret;
return count;
}
static ssize_t vf_coalesc_time_wait_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_timewait(cptvf));
}
static ssize_t vf_coalesc_num_wait_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_numwait(cptvf));
}
static ssize_t vf_coalesc_time_wait_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
long val;
int ret;
ret = kstrtol(buf, 10, &val);
if (ret != 0)
return ret;
if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
val > OTX_CPT_COALESC_MAX_TIME_WAIT)
return -EINVAL;
cptvf_write_vq_done_timewait(cptvf, val);
return count;
}
static ssize_t vf_coalesc_num_wait_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
long val;
int ret;
ret = kstrtol(buf, 10, &val);
if (ret != 0)
return ret;
if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
val > OTX_CPT_COALESC_MAX_NUM_WAIT)
return -EINVAL;
cptvf_write_vq_done_numwait(cptvf, val);
return count;
}
static DEVICE_ATTR_RO(vf_type);
static DEVICE_ATTR_RW(vf_engine_group);
static DEVICE_ATTR_RW(vf_coalesc_time_wait);
static DEVICE_ATTR_RW(vf_coalesc_num_wait);
static struct attribute *otx_cptvf_attrs[] = {
&dev_attr_vf_type.attr,
&dev_attr_vf_engine_group.attr,
&dev_attr_vf_coalesc_time_wait.attr,
&dev_attr_vf_coalesc_num_wait.attr,
NULL
};
static const struct attribute_group otx_cptvf_sysfs_group = {
.attrs = otx_cptvf_attrs,
};
static int otx_cptvf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct otx_cptvf *cptvf;
int err;
cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
if (!cptvf)
return -ENOMEM;
pci_set_drvdata(pdev, cptvf);
cptvf->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
goto clear_drvdata;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
goto disable_device;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
goto release_regions;
}
/* MAP PF's configuration registers */
cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
if (!cptvf->reg_base) {
dev_err(dev, "Cannot map config register space, aborting\n");
err = -ENOMEM;
goto release_regions;
}
cptvf->node = dev_to_node(&pdev->dev);
err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "Request for #%d msix vectors failed\n",
OTX_CPT_VF_MSIX_VECTORS);
goto unmap_region;
}
err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
cptvf_misc_intr_handler, 0, "CPT VF misc intr",
cptvf);
if (err) {
dev_err(dev, "Failed to request misc irq\n");
goto free_vectors;
}
/* Enable mailbox interrupt */
cptvf_enable_mbox_interrupts(cptvf);
cptvf_enable_swerr_interrupts(cptvf);
/* Check cpt pf status, gets chip ID / device Id from PF if ready */
err = otx_cptvf_check_pf_ready(cptvf);
if (err)
goto free_misc_irq;
/* CPT VF software resources initialization */
cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
if (err) {
dev_err(dev, "cptvf_sw_init() failed\n");
goto free_misc_irq;
}
/* Convey VQ LEN to PF */
err = otx_cptvf_send_vq_size_msg(cptvf);
if (err)
goto sw_cleanup;
/* CPT VF device initialization */
cptvf_device_init(cptvf);
/* Send msg to PF to assign currnet Q to required group */
err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
if (err)
goto sw_cleanup;
cptvf->priority = 1;
err = otx_cptvf_send_vf_priority_msg(cptvf);
if (err)
goto sw_cleanup;
err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
cptvf_done_intr_handler, 0, "CPT VF done intr",
cptvf);
if (err) {
dev_err(dev, "Failed to request done irq\n");
goto free_done_irq;
}
/* Enable done interrupt */
cptvf_enable_done_interrupts(cptvf);
/* Set irq affinity masks */
cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
err = otx_cptvf_send_vf_up(cptvf);
if (err)
goto free_irq_affinity;
/* Initialize algorithms and set ops */
err = otx_cpt_crypto_init(pdev, THIS_MODULE,
cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
cptvf->vftype, 1, cptvf->num_vfs);
if (err) {
dev_err(dev, "Failed to register crypto algs\n");
goto free_irq_affinity;
}
err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
if (err) {
dev_err(dev, "Creating sysfs entries failed\n");
goto crypto_exit;
}
return 0;
crypto_exit:
otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
free_irq_affinity:
cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
free_done_irq:
free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
sw_cleanup:
cptvf_sw_cleanup(cptvf);
free_misc_irq:
free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
free_vectors:
pci_free_irq_vectors(cptvf->pdev);
unmap_region:
pci_iounmap(pdev, cptvf->reg_base);
release_regions:
pci_release_regions(pdev);
disable_device:
pci_disable_device(pdev);
clear_drvdata:
pci_set_drvdata(pdev, NULL);
return err;
}
static void otx_cptvf_remove(struct pci_dev *pdev)
{
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!cptvf) {
dev_err(&pdev->dev, "Invalid CPT-VF device\n");
return;
}
/* Convey DOWN to PF */
if (otx_cptvf_send_vf_down(cptvf)) {
dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
} else {
sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
cptvf_sw_cleanup(cptvf);
pci_free_irq_vectors(cptvf->pdev);
pci_iounmap(pdev, cptvf->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
/* Supported devices */
static const struct pci_device_id otx_cptvf_id_table[] = {
{PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
{ 0, } /* end of table */
};
static struct pci_driver otx_cptvf_pci_driver = {
.name = DRV_NAME,
.id_table = otx_cptvf_id_table,
.probe = otx_cptvf_probe,
.remove = otx_cptvf_remove,
};
module_pci_driver(otx_cptvf_pci_driver);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);
| linux-master | drivers/crypto/marvell/octeontx/otx_cptvf_main.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <crypto/aes.h>
#include <crypto/authenc.h>
#include <crypto/cryptd.h>
#include <crypto/des.h>
#include <crypto/internal/aead.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/xts.h>
#include <crypto/scatterwalk.h>
#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx_cptvf.h"
#include "otx_cptvf_algs.h"
#include "otx_cptvf_reqmgr.h"
#define CPT_MAX_VF_NUM 64
/* Size of salt in AES GCM mode */
#define AES_GCM_SALT_SIZE 4
/* Size of IV in AES GCM mode */
#define AES_GCM_IV_SIZE 8
/* Size of ICV (Integrity Check Value) in AES GCM mode */
#define AES_GCM_ICV_SIZE 16
/* Offset of IV in AES GCM mode */
#define AES_GCM_IV_OFFSET 8
#define CONTROL_WORD_LEN 8
#define KEY2_OFFSET 48
#define DMA_MODE_FLAG(dma_mode) \
(((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
/* Truncated SHA digest size */
#define SHA1_TRUNC_DIGEST_SIZE 12
#define SHA256_TRUNC_DIGEST_SIZE 16
#define SHA384_TRUNC_DIGEST_SIZE 24
#define SHA512_TRUNC_DIGEST_SIZE 32
static DEFINE_MUTEX(mutex);
static int is_crypto_registered;
struct cpt_device_desc {
enum otx_cptpf_type pf_type;
struct pci_dev *dev;
int num_queues;
};
struct cpt_device_table {
atomic_t count;
struct cpt_device_desc desc[CPT_MAX_VF_NUM];
};
static struct cpt_device_table se_devices = {
.count = ATOMIC_INIT(0)
};
static struct cpt_device_table ae_devices = {
.count = ATOMIC_INIT(0)
};
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count, ret = 0;
count = atomic_read(&se_devices.count);
if (count < 1)
return -ENODEV;
*cpu_num = get_cpu();
if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
/*
* On OcteonTX platform there is one CPT instruction queue bound
* to each VF. We get maximum performance if one CPT queue
* is available for each cpu otherwise CPT queues need to be
* shared between cpus.
*/
if (*cpu_num >= count)
*cpu_num %= count;
*pdev = se_devices.desc[*cpu_num].dev;
} else {
pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
ret = -EINVAL;
}
put_cpu();
return ret;
}
static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
{
struct otx_cpt_req_ctx *rctx;
struct aead_request *req;
struct crypto_aead *tfm;
req = container_of(cpt_req->areq, struct aead_request, base);
tfm = crypto_aead_reqtfm(req);
rctx = aead_request_ctx_dma(req);
if (memcmp(rctx->fctx.hmac.s.hmac_calc,
rctx->fctx.hmac.s.hmac_recv,
crypto_aead_authsize(tfm)) != 0)
return -EBADMSG;
return 0;
}
static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
{
struct otx_cpt_info_buffer *cpt_info = arg2;
struct crypto_async_request *areq = arg1;
struct otx_cpt_req_info *cpt_req;
struct pci_dev *pdev;
if (!cpt_info)
goto complete;
cpt_req = cpt_info->req;
if (!status) {
/*
* When selected cipher is NULL we need to manually
* verify whether calculated hmac value matches
* received hmac value
*/
if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
!cpt_req->is_enc)
status = validate_hmac_cipher_null(cpt_req);
}
pdev = cpt_info->pdev;
do_request_cleanup(pdev, cpt_info);
complete:
if (areq)
crypto_request_complete(areq, status);
}
static void output_iv_copyback(struct crypto_async_request *areq)
{
struct otx_cpt_req_info *req_info;
struct skcipher_request *sreq;
struct crypto_skcipher *stfm;
struct otx_cpt_req_ctx *rctx;
struct otx_cpt_enc_ctx *ctx;
u32 start, ivsize;
sreq = container_of(areq, struct skcipher_request, base);
stfm = crypto_skcipher_reqtfm(sreq);
ctx = crypto_skcipher_ctx(stfm);
if (ctx->cipher_type == OTX_CPT_AES_CBC ||
ctx->cipher_type == OTX_CPT_DES3_CBC) {
rctx = skcipher_request_ctx_dma(sreq);
req_info = &rctx->cpt_req;
ivsize = crypto_skcipher_ivsize(stfm);
start = sreq->cryptlen - ivsize;
if (req_info->is_enc) {
scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
ivsize, 0);
} else {
if (sreq->src != sreq->dst) {
scatterwalk_map_and_copy(sreq->iv, sreq->src,
start, ivsize, 0);
} else {
memcpy(sreq->iv, req_info->iv_out, ivsize);
kfree(req_info->iv_out);
}
}
}
}
static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
{
struct otx_cpt_info_buffer *cpt_info = arg2;
struct crypto_async_request *areq = arg1;
struct pci_dev *pdev;
if (areq) {
if (!status)
output_iv_copyback(areq);
if (cpt_info) {
pdev = cpt_info->pdev;
do_request_cleanup(pdev, cpt_info);
}
crypto_request_complete(areq, status);
}
}
static inline void update_input_data(struct otx_cpt_req_info *req_info,
struct scatterlist *inp_sg,
u32 nbytes, u32 *argcnt)
{
req_info->req.dlen += nbytes;
while (nbytes) {
u32 len = min(nbytes, inp_sg->length);
u8 *ptr = sg_virt(inp_sg);
req_info->in[*argcnt].vptr = (void *)ptr;
req_info->in[*argcnt].size = len;
nbytes -= len;
++(*argcnt);
inp_sg = sg_next(inp_sg);
}
}
static inline void update_output_data(struct otx_cpt_req_info *req_info,
struct scatterlist *outp_sg,
u32 offset, u32 nbytes, u32 *argcnt)
{
req_info->rlen += nbytes;
while (nbytes) {
u32 len = min(nbytes, outp_sg->length - offset);
u8 *ptr = sg_virt(outp_sg);
req_info->out[*argcnt].vptr = (void *) (ptr + offset);
req_info->out[*argcnt].size = len;
nbytes -= len;
++(*argcnt);
offset = 0;
outp_sg = sg_next(outp_sg);
}
}
static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
int ivsize = crypto_skcipher_ivsize(stfm);
u32 start = req->cryptlen - ivsize;
gfp_t flags;
flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
if (enc) {
req_info->req.opcode.s.minor = 2;
} else {
req_info->req.opcode.s.minor = 3;
if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
ctx->cipher_type == OTX_CPT_DES3_CBC) &&
req->src == req->dst) {
req_info->iv_out = kmalloc(ivsize, flags);
if (!req_info->iv_out)
return -ENOMEM;
scatterwalk_map_and_copy(req_info->iv_out, req->src,
start, ivsize, 0);
}
}
/* Encryption data length */
req_info->req.param1 = req->cryptlen;
/* Authentication data length */
req_info->req.param2 = 0;
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
if (ctx->cipher_type == OTX_CPT_AES_XTS)
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
else
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
/*
* Storing Packet Data Information in offset
* Control Word First 8 bytes
*/
req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
req_info->in[*argcnt].size = CONTROL_WORD_LEN;
req_info->req.dlen += CONTROL_WORD_LEN;
++(*argcnt);
req_info->in[*argcnt].vptr = (u8 *)fctx;
req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
++(*argcnt);
return 0;
}
static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
int ret;
ret = create_ctx_hdr(req, enc, &argcnt);
if (ret)
return ret;
update_input_data(req_info, req->src, req->cryptlen, &argcnt);
req_info->incnt = argcnt;
return 0;
}
static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
/*
* OUTPUT Buffer Processing
* AES encryption/decryption output would be
* received in the following format
*
* ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
* [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
*/
update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
req_info->outcnt = argcnt;
}
static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
struct pci_dev *pdev;
int status, cpu_num;
/* Validate that request doesn't exceed maximum CPT supported size */
if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
return -E2BIG;
/* Clear control words */
rctx->ctrl_word.flags = 0;
rctx->fctx.enc.enc_ctrl.flags = 0;
status = create_input_list(req, enc, enc_iv_len);
if (status)
return status;
create_output_list(req, enc_iv_len);
status = get_se_device(&pdev, &cpu_num);
if (status)
return status;
req_info->callback = (void *)otx_cpt_skcipher_callback;
req_info->areq = &req->base;
req_info->req_type = OTX_CPT_ENC_DEC_REQ;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
req_info->ctrl.s.grp = 0;
/*
* We perform an asynchronous send and once
* the request is completed the driver would
* intimate through registered call back functions
*/
status = otx_cpt_do_request(pdev, req_info, cpu_num);
return status;
}
static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
{
return cpt_enc_dec(req, true);
}
static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
{
return cpt_enc_dec(req, false);
}
static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
const u8 *key2 = key + (keylen / 2);
const u8 *key1 = key;
int ret;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
ctx->key_len = keylen;
memcpy(ctx->enc_key, key1, keylen / 2);
memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
ctx->cipher_type = OTX_CPT_AES_XTS;
switch (ctx->key_len) {
case 2 * AES_KEYSIZE_128:
ctx->key_type = OTX_CPT_AES_128_BIT;
break;
case 2 * AES_KEYSIZE_256:
ctx->key_type = OTX_CPT_AES_256_BIT;
break;
default:
return -EINVAL;
}
return 0;
}
static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
u32 keylen, u8 cipher_type)
{
struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
if (keylen != DES3_EDE_KEY_SIZE)
return -EINVAL;
ctx->key_len = keylen;
ctx->cipher_type = cipher_type;
memcpy(ctx->enc_key, key, keylen);
return 0;
}
static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
u32 keylen, u8 cipher_type)
{
struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (keylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX_CPT_AES_256_BIT;
break;
default:
return -EINVAL;
}
ctx->key_len = keylen;
ctx->cipher_type = cipher_type;
memcpy(ctx->enc_key, key, keylen);
return 0;
}
static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
}
static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
}
static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
}
static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
}
static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
}
static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
{
struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
memset(ctx, 0, sizeof(*ctx));
/*
* Additional memory for skcipher_request is
* allocated since the cryptd daemon uses
* this memory for request_ctx information
*/
crypto_skcipher_set_reqsize_dma(
tfm, sizeof(struct otx_cpt_req_ctx) +
sizeof(struct skcipher_request));
return 0;
}
static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
switch (ctx->mac_type) {
case OTX_CPT_SHA1:
ctx->hashalg = crypto_alloc_shash("sha1", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX_CPT_SHA256:
ctx->hashalg = crypto_alloc_shash("sha256", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX_CPT_SHA384:
ctx->hashalg = crypto_alloc_shash("sha384", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX_CPT_SHA512:
ctx->hashalg = crypto_alloc_shash("sha512", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
}
}
crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
return 0;
}
static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
}
static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
}
static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
}
static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
}
static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
}
static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
}
static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
}
static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
}
static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
{
return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
}
static void otx_cpt_aead_exit(struct crypto_aead *tfm)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
kfree(ctx->ipad);
kfree(ctx->opad);
if (ctx->hashalg)
crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
}
/*
* This is the Integrity Check Value validation (aka the authentication tag
* length)
*/
static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
switch (ctx->mac_type) {
case OTX_CPT_SHA1:
if (authsize != SHA1_DIGEST_SIZE &&
authsize != SHA1_TRUNC_DIGEST_SIZE)
return -EINVAL;
if (authsize == SHA1_TRUNC_DIGEST_SIZE)
ctx->is_trunc_hmac = true;
break;
case OTX_CPT_SHA256:
if (authsize != SHA256_DIGEST_SIZE &&
authsize != SHA256_TRUNC_DIGEST_SIZE)
return -EINVAL;
if (authsize == SHA256_TRUNC_DIGEST_SIZE)
ctx->is_trunc_hmac = true;
break;
case OTX_CPT_SHA384:
if (authsize != SHA384_DIGEST_SIZE &&
authsize != SHA384_TRUNC_DIGEST_SIZE)
return -EINVAL;
if (authsize == SHA384_TRUNC_DIGEST_SIZE)
ctx->is_trunc_hmac = true;
break;
case OTX_CPT_SHA512:
if (authsize != SHA512_DIGEST_SIZE &&
authsize != SHA512_TRUNC_DIGEST_SIZE)
return -EINVAL;
if (authsize == SHA512_TRUNC_DIGEST_SIZE)
ctx->is_trunc_hmac = true;
break;
case OTX_CPT_MAC_NULL:
if (ctx->cipher_type == OTX_CPT_AES_GCM) {
if (authsize != AES_GCM_ICV_SIZE)
return -EINVAL;
} else
return -EINVAL;
break;
default:
return -EINVAL;
}
tfm->authsize = authsize;
return 0;
}
static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
{
struct otx_cpt_sdesc *sdesc;
int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc)
return NULL;
sdesc->shash.tfm = alg;
return sdesc;
}
static inline void swap_data32(void *buf, u32 len)
{
cpu_to_be32_array(buf, buf, len / 4);
}
static inline void swap_data64(void *buf, u32 len)
{
__be64 *dst = buf;
u64 *src = buf;
int i = 0;
for (i = 0 ; i < len / 8; i++, src++, dst++)
*dst = cpu_to_be64p(src);
}
static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
struct sha1_state *sha1;
switch (mac_type) {
case OTX_CPT_SHA1:
sha1 = (struct sha1_state *) in_pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX_CPT_SHA256:
sha256 = (struct sha256_state *) in_pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX_CPT_SHA384:
case OTX_CPT_SHA512:
sha512 = (struct sha512_state *) in_pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
return -EINVAL;
}
return 0;
}
static int aead_hmac_init(struct crypto_aead *cipher)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
int authkeylen = ctx->auth_key_len;
u8 *ipad = NULL, *opad = NULL;
int ret = 0, icount = 0;
ctx->sdesc = alloc_sdesc(ctx->hashalg);
if (!ctx->sdesc)
return -ENOMEM;
ctx->ipad = kzalloc(bs, GFP_KERNEL);
if (!ctx->ipad) {
ret = -ENOMEM;
goto calc_fail;
}
ctx->opad = kzalloc(bs, GFP_KERNEL);
if (!ctx->opad) {
ret = -ENOMEM;
goto calc_fail;
}
ipad = kzalloc(state_size, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto calc_fail;
}
opad = kzalloc(state_size, GFP_KERNEL);
if (!opad) {
ret = -ENOMEM;
goto calc_fail;
}
if (authkeylen > bs) {
ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
authkeylen, ipad);
if (ret)
goto calc_fail;
authkeylen = ds;
} else {
memcpy(ipad, ctx->key, authkeylen);
}
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
for (icount = 0; icount < bs; icount++) {
ipad[icount] ^= 0x36;
opad[icount] ^= 0x5c;
}
/*
* Partial Hash calculated from the software
* algorithm is retrieved for IPAD & OPAD
*/
/* IPAD Calculation */
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
if (ret)
goto calc_fail;
/* OPAD Calculation */
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
ret = copy_pad(ctx->mac_type, ctx->opad, opad);
if (ret)
goto calc_fail;
kfree(ipad);
kfree(opad);
return 0;
calc_fail:
kfree(ctx->ipad);
ctx->ipad = NULL;
kfree(ctx->opad);
ctx->opad = NULL;
kfree(ipad);
kfree(opad);
kfree(ctx->sdesc);
ctx->sdesc = NULL;
return ret;
}
static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
int status = -EINVAL;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
if (keylen > OTX_CPT_MAX_KEY_SIZE)
goto badkey;
authkeylen = keylen - enckeylen;
memcpy(ctx->key, key, keylen);
switch (enckeylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX_CPT_AES_256_BIT;
break;
default:
/* Invalid key length */
goto badkey;
}
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = authkeylen;
status = aead_hmac_init(cipher);
if (status)
goto badkey;
return 0;
badkey:
return status;
}
static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
struct rtattr *rta = (void *)key;
int enckeylen = 0;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (enckeylen != 0)
goto badkey;
if (keylen > OTX_CPT_MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->key, key, keylen);
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = keylen;
return 0;
badkey:
return -EINVAL;
}
static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
/*
* For aes gcm we expect to get encryption key (16, 24, 32 bytes)
* and salt (4 bytes)
*/
switch (keylen) {
case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
ctx->key_type = OTX_CPT_AES_128_BIT;
ctx->enc_key_len = AES_KEYSIZE_128;
break;
case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
ctx->key_type = OTX_CPT_AES_192_BIT;
ctx->enc_key_len = AES_KEYSIZE_192;
break;
case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
ctx->key_type = OTX_CPT_AES_256_BIT;
ctx->enc_key_len = AES_KEYSIZE_256;
break;
default:
/* Invalid key and salt length */
return -EINVAL;
}
/* Store encryption key and salt */
memcpy(ctx->key, key, keylen);
return 0;
}
static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
u32 *argcnt)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
int mac_len = crypto_aead_authsize(tfm);
int ds;
rctx->ctrl_word.e.enc_data_offset = req->assoclen;
switch (ctx->cipher_type) {
case OTX_CPT_AES_CBC:
fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
/* Copy encryption key to context */
memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
ctx->enc_key_len);
/* Copy IV to context */
memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
ds = crypto_shash_digestsize(ctx->hashalg);
if (ctx->mac_type == OTX_CPT_SHA384)
ds = SHA512_DIGEST_SIZE;
if (ctx->ipad)
memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
if (ctx->opad)
memcpy(fctx->hmac.e.opad, ctx->opad, ds);
break;
case OTX_CPT_AES_GCM:
fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
/* Copy encryption key to context */
memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
/* Copy salt to context */
memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
AES_GCM_SALT_SIZE);
rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
break;
default:
/* Unknown cipher type */
return -EINVAL;
}
rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
if (enc) {
req_info->req.opcode.s.minor = 2;
req_info->req.param1 = req->cryptlen;
req_info->req.param2 = req->cryptlen + req->assoclen;
} else {
req_info->req.opcode.s.minor = 3;
req_info->req.param1 = req->cryptlen - mac_len;
req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
}
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
fctx->enc.enc_ctrl.e.mac_len = mac_len;
fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
/*
* Storing Packet Data Information in offset
* Control Word First 8 bytes
*/
req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
req_info->in[*argcnt].size = CONTROL_WORD_LEN;
req_info->req.dlen += CONTROL_WORD_LEN;
++(*argcnt);
req_info->in[*argcnt].vptr = (u8 *)fctx;
req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
++(*argcnt);
return 0;
}
static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
u32 enc)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
req_info->is_trunc_hmac = ctx->is_trunc_hmac;
req_info->req.opcode.s.minor = 0;
req_info->req.param1 = ctx->auth_key_len;
req_info->req.param2 = ctx->mac_type << 8;
/* Add authentication key */
req_info->in[*argcnt].vptr = ctx->key;
req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
req_info->req.dlen += round_up(ctx->auth_key_len, 8);
++(*argcnt);
return 0;
}
static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen = req->cryptlen + req->assoclen;
u32 status, argcnt = 0;
status = create_aead_ctx_hdr(req, enc, &argcnt);
if (status)
return status;
update_input_data(req_info, req->src, inputlen, &argcnt);
req_info->incnt = argcnt;
return 0;
}
static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
u32 mac_len)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 argcnt = 0, outputlen = 0;
if (enc)
outputlen = req->cryptlen + req->assoclen + mac_len;
else
outputlen = req->cryptlen + req->assoclen - mac_len;
update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
req_info->outcnt = argcnt;
return 0;
}
static inline u32 create_aead_null_input_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
u32 inputlen, argcnt = 0;
if (enc)
inputlen = req->cryptlen + req->assoclen;
else
inputlen = req->cryptlen + req->assoclen - mac_len;
create_hmac_ctx_hdr(req, &argcnt, enc);
update_input_data(req_info, req->src, inputlen, &argcnt);
req_info->incnt = argcnt;
return 0;
}
static inline u32 create_aead_null_output_list(struct aead_request *req,
u32 enc, u32 mac_len)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct scatterlist *dst;
u8 *ptr = NULL;
int argcnt = 0, status, offset;
u32 inputlen;
if (enc)
inputlen = req->cryptlen + req->assoclen;
else
inputlen = req->cryptlen + req->assoclen - mac_len;
/*
* If source and destination are different
* then copy payload to destination
*/
if (req->src != req->dst) {
ptr = kmalloc(inputlen, (req_info->areq->flags &
CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!ptr) {
status = -ENOMEM;
goto error;
}
status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
inputlen);
if (status != inputlen) {
status = -EINVAL;
goto error_free;
}
status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
inputlen);
if (status != inputlen) {
status = -EINVAL;
goto error_free;
}
kfree(ptr);
}
if (enc) {
/*
* In an encryption scenario hmac needs
* to be appended after payload
*/
dst = req->dst;
offset = inputlen;
while (offset >= dst->length) {
offset -= dst->length;
dst = sg_next(dst);
if (!dst) {
status = -ENOENT;
goto error;
}
}
update_output_data(req_info, dst, offset, mac_len, &argcnt);
} else {
/*
* In a decryption scenario calculated hmac for received
* payload needs to be compare with hmac received
*/
status = sg_copy_buffer(req->src, sg_nents(req->src),
rctx->fctx.hmac.s.hmac_recv, mac_len,
inputlen, true);
if (status != mac_len) {
status = -EINVAL;
goto error;
}
req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
req_info->out[argcnt].size = mac_len;
argcnt++;
}
req_info->outcnt = argcnt;
return 0;
error_free:
kfree(ptr);
error:
return status;
}
static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
{
struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
struct otx_cpt_req_info *req_info = &rctx->cpt_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct pci_dev *pdev;
u32 status, cpu_num;
/* Clear control words */
rctx->ctrl_word.flags = 0;
rctx->fctx.enc.enc_ctrl.flags = 0;
req_info->callback = otx_cpt_aead_callback;
req_info->areq = &req->base;
req_info->req_type = reg_type;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
switch (reg_type) {
case OTX_CPT_AEAD_ENC_DEC_REQ:
status = create_aead_input_list(req, enc);
if (status)
return status;
status = create_aead_output_list(req, enc,
crypto_aead_authsize(tfm));
if (status)
return status;
break;
case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
status = create_aead_null_input_list(req, enc,
crypto_aead_authsize(tfm));
if (status)
return status;
status = create_aead_null_output_list(req, enc,
crypto_aead_authsize(tfm));
if (status)
return status;
break;
default:
return -EINVAL;
}
/* Validate that request doesn't exceed maximum CPT supported size */
if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
return -E2BIG;
status = get_se_device(&pdev, &cpu_num);
if (status)
return status;
req_info->ctrl.s.grp = 0;
status = otx_cpt_do_request(pdev, req_info, cpu_num);
/*
* We perform an asynchronous send and once
* the request is completed the driver would
* intimate through registered call back functions
*/
return status;
}
static int otx_cpt_aead_encrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
}
static int otx_cpt_aead_decrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
}
static int otx_cpt_aead_null_encrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
}
static int otx_cpt_aead_null_decrypt(struct aead_request *req)
{
return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
}
static struct skcipher_alg otx_cpt_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "cpt_xts_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx_cpt_enc_dec_init,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.setkey = otx_cpt_skcipher_xts_setkey,
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cpt_cbc_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx_cpt_enc_dec_init,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = otx_cpt_skcipher_cbc_aes_setkey,
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "cpt_ecb_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx_cpt_enc_dec_init,
.ivsize = 0,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = otx_cpt_skcipher_ecb_aes_setkey,
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "cpt_cfb_aes",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx_cpt_enc_dec_init,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = otx_cpt_skcipher_cfb_aes_setkey,
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cpt_cbc_des3_ede",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx_cpt_enc_dec_init,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = otx_cpt_skcipher_cbc_des3_setkey,
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "cpt_ecb_des3_ede",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
.base.cra_alignmask = 7,
.base.cra_priority = 4001,
.base.cra_module = THIS_MODULE,
.init = otx_cpt_enc_dec_init,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = 0,
.setkey = otx_cpt_skcipher_ecb_des3_setkey,
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
} };
static struct aead_alg otx_cpt_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_cbc_aes_sha1_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_encrypt,
.decrypt = otx_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_cbc_aes_sha256_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_encrypt,
.decrypt = otx_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_cbc_aes_sha384_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_encrypt,
.decrypt = otx_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_cbc_aes_sha512_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_encrypt,
.decrypt = otx_cpt_aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha1_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_ecb_null_sha1_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_null_encrypt,
.decrypt = otx_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA1_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha256_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_ecb_null_sha256_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_null_encrypt,
.decrypt = otx_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA256_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha384_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_ecb_null_sha384_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_null_encrypt,
.decrypt = otx_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA384_DIGEST_SIZE,
}, {
.base = {
.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha512_ecb_null",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_ecb_null_sha512_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_ecb_null_sha_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_null_encrypt,
.decrypt = otx_cpt_aead_null_decrypt,
.ivsize = 0,
.maxauthsize = SHA512_DIGEST_SIZE,
}, {
.base = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "cpt_rfc4106_gcm_aes",
.cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
.cra_priority = 4001,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = otx_cpt_aead_gcm_aes_init,
.exit = otx_cpt_aead_exit,
.setkey = otx_cpt_aead_gcm_aes_setkey,
.setauthsize = otx_cpt_aead_set_authsize,
.encrypt = otx_cpt_aead_encrypt,
.decrypt = otx_cpt_aead_decrypt,
.ivsize = AES_GCM_IV_SIZE,
.maxauthsize = AES_GCM_ICV_SIZE,
} };
static inline int is_any_alg_used(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
return true;
for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
return true;
return false;
}
static inline int cpt_register_algs(void)
{
int i, err = 0;
if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_register_skciphers(otx_cpt_skciphers,
ARRAY_SIZE(otx_cpt_skciphers));
if (err)
return err;
}
for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
if (err) {
crypto_unregister_skciphers(otx_cpt_skciphers,
ARRAY_SIZE(otx_cpt_skciphers));
return err;
}
return 0;
}
static inline void cpt_unregister_algs(void)
{
crypto_unregister_skciphers(otx_cpt_skciphers,
ARRAY_SIZE(otx_cpt_skciphers));
crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
}
static int compare_func(const void *lptr, const void *rptr)
{
struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
if (ldesc->dev->devfn < rdesc->dev->devfn)
return -1;
if (ldesc->dev->devfn > rdesc->dev->devfn)
return 1;
return 0;
}
static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
swap(*ldesc, *rdesc);
}
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
enum otx_cptpf_type pf_type,
enum otx_cptvf_type engine_type,
int num_queues, int num_devices)
{
int ret = 0;
int count;
mutex_lock(&mutex);
switch (engine_type) {
case OTX_CPT_SE_TYPES:
count = atomic_read(&se_devices.count);
if (count >= CPT_MAX_VF_NUM) {
dev_err(&pdev->dev, "No space to add a new device\n");
ret = -ENOSPC;
goto err;
}
se_devices.desc[count].pf_type = pf_type;
se_devices.desc[count].num_queues = num_queues;
se_devices.desc[count++].dev = pdev;
atomic_inc(&se_devices.count);
if (atomic_read(&se_devices.count) == num_devices &&
is_crypto_registered == false) {
if (cpt_register_algs()) {
dev_err(&pdev->dev,
"Error in registering crypto algorithms\n");
ret = -EINVAL;
goto err;
}
try_module_get(mod);
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
compare_func, swap_func);
break;
case OTX_CPT_AE_TYPES:
count = atomic_read(&ae_devices.count);
if (count >= CPT_MAX_VF_NUM) {
dev_err(&pdev->dev, "No space to a add new device\n");
ret = -ENOSPC;
goto err;
}
ae_devices.desc[count].pf_type = pf_type;
ae_devices.desc[count].num_queues = num_queues;
ae_devices.desc[count++].dev = pdev;
atomic_inc(&ae_devices.count);
sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
compare_func, swap_func);
break;
default:
dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
ret = BAD_OTX_CPTVF_TYPE;
}
err:
mutex_unlock(&mutex);
return ret;
}
void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
enum otx_cptvf_type engine_type)
{
struct cpt_device_table *dev_tbl;
bool dev_found = false;
int i, j, count;
mutex_lock(&mutex);
dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
count = atomic_read(&dev_tbl->count);
for (i = 0; i < count; i++)
if (pdev == dev_tbl->desc[i].dev) {
for (j = i; j < count-1; j++)
dev_tbl->desc[j] = dev_tbl->desc[j+1];
dev_found = true;
break;
}
if (!dev_found) {
dev_err(&pdev->dev, "%s device not found\n", __func__);
goto exit;
}
if (engine_type != OTX_CPT_AE_TYPES) {
if (atomic_dec_and_test(&se_devices.count) &&
!is_any_alg_used()) {
cpt_unregister_algs();
module_put(mod);
is_crypto_registered = false;
}
} else
atomic_dec(&ae_devices.count);
exit:
mutex_unlock(&mutex);
}
| linux-master | drivers/crypto/marvell/octeontx/otx_cptvf_algs.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "otx_cpt_common.h"
#include "otx_cptpf.h"
static char *get_mbox_opcode_str(int msg_opcode)
{
char *str = "Unknown";
switch (msg_opcode) {
case OTX_CPT_MSG_VF_UP:
str = "UP";
break;
case OTX_CPT_MSG_VF_DOWN:
str = "DOWN";
break;
case OTX_CPT_MSG_READY:
str = "READY";
break;
case OTX_CPT_MSG_QLEN:
str = "QLEN";
break;
case OTX_CPT_MSG_QBIND_GRP:
str = "QBIND_GRP";
break;
case OTX_CPT_MSG_VQ_PRIORITY:
str = "VQ_PRIORITY";
break;
case OTX_CPT_MSG_PF_TYPE:
str = "PF_TYPE";
break;
case OTX_CPT_MSG_ACK:
str = "ACK";
break;
case OTX_CPT_MSG_NACK:
str = "NACK";
break;
}
return str;
}
static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
{
char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
pr_debug("MBOX opcode %s received from PF raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
static void otx_cpt_send_msg_to_vf(struct otx_cpt_device *cpt, int vf,
struct otx_cpt_mbox *mbx)
{
/* Writing mbox(0) causes interrupt */
writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
}
/*
* ACKs VF's mailbox message
* @vf: VF to which ACK to be sent
*/
static void otx_cpt_mbox_send_ack(struct otx_cpt_device *cpt, int vf,
struct otx_cpt_mbox *mbx)
{
mbx->data = 0ull;
mbx->msg = OTX_CPT_MSG_ACK;
otx_cpt_send_msg_to_vf(cpt, vf, mbx);
}
/* NACKs VF's mailbox message that PF is not able to complete the action */
static void otx_cptpf_mbox_send_nack(struct otx_cpt_device *cpt, int vf,
struct otx_cpt_mbox *mbx)
{
mbx->data = 0ull;
mbx->msg = OTX_CPT_MSG_NACK;
otx_cpt_send_msg_to_vf(cpt, vf, mbx);
}
static void otx_cpt_clear_mbox_intr(struct otx_cpt_device *cpt, u32 vf)
{
/* W1C for the VF */
writeq(1ull << vf, cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
}
/*
* Configure QLEN/Chunk sizes for VF
*/
static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device *cpt, int vf,
u32 size)
{
union otx_cptx_pf_qx_ctl pf_qx_ctl;
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
pf_qx_ctl.s.size = size;
pf_qx_ctl.s.cont_err = true;
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
}
/*
* Configure VQ priority
*/
static void otx_cpt_cfg_vq_priority(struct otx_cpt_device *cpt, int vf, u32 pri)
{
union otx_cptx_pf_qx_ctl pf_qx_ctl;
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
pf_qx_ctl.s.pri = pri;
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
}
static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
{
struct device *dev = &cpt->pdev->dev;
struct otx_cpt_eng_grp_info *eng_grp;
union otx_cptx_pf_qx_ctl pf_qx_ctl;
struct otx_cpt_ucode *ucode;
if (q >= cpt->max_vfs) {
dev_err(dev, "Requested queue %d is > than maximum avail %d\n",
q, cpt->max_vfs);
return -EINVAL;
}
if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Requested group %d is > than maximum avail %d\n",
grp, OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
eng_grp = &cpt->eng_grps.grp[grp];
if (!eng_grp->is_enabled) {
dev_err(dev, "Requested engine group %d is disabled\n", grp);
return -EINVAL;
}
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
pf_qx_ctl.s.grp = grp;
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
if (eng_grp->mirror.is_ena)
ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
else
ucode = &eng_grp->ucode[0];
if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_SE_TYPES))
return OTX_CPT_SE_TYPES;
else if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_AE_TYPES))
return OTX_CPT_AE_TYPES;
else
return BAD_OTX_CPTVF_TYPE;
}
/* Interrupt handler to handle mailbox messages from VFs */
static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
{
int vftype = 0;
struct otx_cpt_mbox mbx = {};
struct device *dev = &cpt->pdev->dev;
/*
* MBOX[0] contains msg
* MBOX[1] contains data
*/
mbx.msg = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
mbx.data = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
dump_mbox_msg(&mbx, vf);
switch (mbx.msg) {
case OTX_CPT_MSG_VF_UP:
mbx.msg = OTX_CPT_MSG_VF_UP;
mbx.data = cpt->vfs_enabled;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_READY:
mbx.msg = OTX_CPT_MSG_READY;
mbx.data = vf;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_VF_DOWN:
/* First msg in VF teardown sequence */
otx_cpt_mbox_send_ack(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_QLEN:
otx_cpt_cfg_qlen_for_vf(cpt, vf, mbx.data);
otx_cpt_mbox_send_ack(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_QBIND_GRP:
vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
if ((vftype != OTX_CPT_AE_TYPES) &&
(vftype != OTX_CPT_SE_TYPES)) {
dev_err(dev, "VF%d binding to eng group %llu failed\n",
vf, mbx.data);
otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
} else {
mbx.msg = OTX_CPT_MSG_QBIND_GRP;
mbx.data = vftype;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
}
break;
case OTX_CPT_MSG_PF_TYPE:
mbx.msg = OTX_CPT_MSG_PF_TYPE;
mbx.data = cpt->pf_type;
otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
break;
case OTX_CPT_MSG_VQ_PRIORITY:
otx_cpt_cfg_vq_priority(cpt, vf, mbx.data);
otx_cpt_mbox_send_ack(cpt, vf, &mbx);
break;
default:
dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
vf, mbx.msg);
break;
}
}
void otx_cpt_mbox_intr_handler (struct otx_cpt_device *cpt, int mbx)
{
u64 intr;
u8 vf;
intr = readq(cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
pr_debug("PF interrupt mbox%d mask 0x%llx\n", mbx, intr);
for (vf = 0; vf < cpt->max_vfs; vf++) {
if (intr & (1ULL << vf)) {
otx_cpt_handle_mbox_intr(cpt, vf);
otx_cpt_clear_mbox_intr(cpt, vf);
}
}
}
| linux-master | drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "otx_cptvf.h"
#include "otx_cptvf_algs.h"
/* Completion code size and initial value */
#define COMPLETION_CODE_SIZE 8
#define COMPLETION_CODE_INIT 0
/* SG list header size in bytes */
#define SG_LIST_HDR_SIZE 8
/* Default timeout when waiting for free pending entry in us */
#define CPT_PENTRY_TIMEOUT 1000
#define CPT_PENTRY_STEP 50
/* Default threshold for stopping and resuming sender requests */
#define CPT_IQ_STOP_MARGIN 128
#define CPT_IQ_RESUME_MARGIN 512
#define CPT_DMA_ALIGN 128
void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req)
{
int i;
pr_debug("Gather list size %d\n", req->incnt);
for (i = 0; i < req->incnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->in[i].size, req->in[i].vptr,
(void *) req->in[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n",
req->in[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->in[i].vptr, req->in[i].size, false);
}
pr_debug("Scatter list size %d\n", req->outcnt);
for (i = 0; i < req->outcnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->out[i].size, req->out[i].vptr,
(void *) req->out[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->out[i].vptr, req->out[i].size, false);
}
}
static inline struct otx_cpt_pending_entry *get_free_pending_entry(
struct otx_cpt_pending_queue *q,
int qlen)
{
struct otx_cpt_pending_entry *ent = NULL;
ent = &q->head[q->rear];
if (unlikely(ent->busy))
return NULL;
q->rear++;
if (unlikely(q->rear == qlen))
q->rear = 0;
return ent;
}
static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
{
if (WARN_ON(inc > length))
inc = length;
index += inc;
if (unlikely(index >= length))
index -= length;
return index;
}
static inline void free_pentry(struct otx_cpt_pending_entry *pentry)
{
pentry->completion_addr = NULL;
pentry->info = NULL;
pentry->callback = NULL;
pentry->areq = NULL;
pentry->resume_sender = false;
pentry->busy = false;
}
static inline int setup_sgio_components(struct pci_dev *pdev,
struct otx_cpt_buf_ptr *list,
int buf_count, u8 *buffer)
{
struct otx_cpt_sglist_component *sg_ptr = NULL;
int ret = 0, i, j;
int components;
if (unlikely(!list)) {
dev_err(&pdev->dev, "Input list pointer is NULL\n");
return -EFAULT;
}
for (i = 0; i < buf_count; i++) {
if (likely(list[i].vptr)) {
list[i].dma_addr = dma_map_single(&pdev->dev,
list[i].vptr,
list[i].size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&pdev->dev,
list[i].dma_addr))) {
dev_err(&pdev->dev, "Dma mapping failed\n");
ret = -EIO;
goto sg_cleanup;
}
}
}
components = buf_count / 4;
sg_ptr = (struct otx_cpt_sglist_component *)buffer;
for (i = 0; i < components; i++) {
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
sg_ptr++;
}
components = buf_count % 4;
switch (components) {
case 3:
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
fallthrough;
case 2:
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
fallthrough;
case 1:
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
break;
default:
break;
}
return ret;
sg_cleanup:
for (j = 0; j < i; j++) {
if (list[j].dma_addr) {
dma_unmap_single(&pdev->dev, list[i].dma_addr,
list[i].size, DMA_BIDIRECTIONAL);
}
list[j].dma_addr = 0;
}
return ret;
}
static inline int setup_sgio_list(struct pci_dev *pdev,
struct otx_cpt_info_buffer **pinfo,
struct otx_cpt_req_info *req, gfp_t gfp)
{
u32 dlen, align_dlen, info_len, rlen;
struct otx_cpt_info_buffer *info;
u16 g_sz_bytes, s_sz_bytes;
int align = CPT_DMA_ALIGN;
u32 total_mem_len;
if (unlikely(req->incnt > OTX_CPT_MAX_SG_IN_CNT ||
req->outcnt > OTX_CPT_MAX_SG_OUT_CNT)) {
dev_err(&pdev->dev, "Error too many sg components\n");
return -EINVAL;
}
g_sz_bytes = ((req->incnt + 3) / 4) *
sizeof(struct otx_cpt_sglist_component);
s_sz_bytes = ((req->outcnt + 3) / 4) *
sizeof(struct otx_cpt_sglist_component);
dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
align_dlen = ALIGN(dlen, align);
info_len = ALIGN(sizeof(*info), align);
rlen = ALIGN(sizeof(union otx_cpt_res_s), align);
total_mem_len = align_dlen + info_len + rlen + COMPLETION_CODE_SIZE;
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
}
*pinfo = info;
info->dlen = dlen;
info->in_buffer = (u8 *)info + info_len;
((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
((u16 *)info->in_buffer)[2] = 0;
((u16 *)info->in_buffer)[3] = 0;
/* Setup gather (input) components */
if (setup_sgio_components(pdev, req->in, req->incnt,
&info->in_buffer[8])) {
dev_err(&pdev->dev, "Failed to setup gather list\n");
return -EFAULT;
}
if (setup_sgio_components(pdev, req->out, req->outcnt,
&info->in_buffer[8 + g_sz_bytes])) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
return -EFAULT;
}
info->dma_len = total_mem_len - info_len;
info->dptr_baddr = dma_map_single(&pdev->dev, (void *)info->in_buffer,
info->dma_len, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
return -EIO;
}
/*
* Get buffer for union otx_cpt_res_s response
* structure and its physical address
*/
info->completion_addr = (u64 *)(info->in_buffer + align_dlen);
info->comp_baddr = info->dptr_baddr + align_dlen;
/* Create and initialize RPTR */
info->out_buffer = (u8 *)info->completion_addr + rlen;
info->rptr_baddr = info->comp_baddr + rlen;
*((u64 *) info->out_buffer) = ~((u64) COMPLETION_CODE_INIT);
return 0;
}
static void cpt_fill_inst(union otx_cpt_inst_s *inst,
struct otx_cpt_info_buffer *info,
struct otx_cpt_iq_cmd *cmd)
{
inst->u[0] = 0x0;
inst->s.doneint = true;
inst->s.res_addr = (u64)info->comp_baddr;
inst->u[2] = 0x0;
inst->s.wq_ptr = 0;
inst->s.ei0 = cmd->cmd.u64;
inst->s.ei1 = cmd->dptr;
inst->s.ei2 = cmd->rptr;
inst->s.ei3 = cmd->cptr.u64;
}
/*
* On OcteonTX platform the parameter db_count is used as a count for ringing
* door bell. The valid values for db_count are:
* 0 - 1 CPT instruction will be enqueued however CPT will not be informed
* 1 - 1 CPT instruction will be enqueued and CPT will be informed
*/
static void cpt_send_cmd(union otx_cpt_inst_s *cptinst, struct otx_cptvf *cptvf)
{
struct otx_cpt_cmd_qinfo *qinfo = &cptvf->cqinfo;
struct otx_cpt_cmd_queue *queue;
struct otx_cpt_cmd_chunk *curr;
u8 *ent;
queue = &qinfo->queue[0];
/*
* cpt_send_cmd is currently called only from critical section
* therefore no locking is required for accessing instruction queue
*/
ent = &queue->qhead->head[queue->idx * OTX_CPT_INST_SIZE];
memcpy(ent, (void *) cptinst, OTX_CPT_INST_SIZE);
if (++queue->idx >= queue->qhead->size / 64) {
curr = queue->qhead;
if (list_is_last(&curr->nextchunk, &queue->chead))
queue->qhead = queue->base;
else
queue->qhead = list_next_entry(queue->qhead, nextchunk);
queue->idx = 0;
}
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
otx_cptvf_write_vq_doorbell(cptvf, 1);
}
static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
struct otx_cpt_pending_queue *pqueue,
struct otx_cptvf *cptvf)
{
struct otx_cptvf_request *cpt_req = &req->req;
struct otx_cpt_pending_entry *pentry = NULL;
union otx_cpt_ctrl_info *ctrl = &req->ctrl;
struct otx_cpt_info_buffer *info = NULL;
union otx_cpt_res_s *result = NULL;
struct otx_cpt_iq_cmd iq_cmd;
union otx_cpt_inst_s cptinst;
int retry, ret = 0;
u8 resume_sender;
gfp_t gfp;
gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
ret = setup_sgio_list(pdev, &info, req, gfp);
if (unlikely(ret)) {
dev_err(&pdev->dev, "Setting up SG list failed\n");
goto request_cleanup;
}
cpt_req->dlen = info->dlen;
result = (union otx_cpt_res_s *) info->completion_addr;
result->s.compcode = COMPLETION_CODE_INIT;
spin_lock_bh(&pqueue->lock);
pentry = get_free_pending_entry(pqueue, pqueue->qlen);
retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
while (unlikely(!pentry) && retry--) {
spin_unlock_bh(&pqueue->lock);
udelay(CPT_PENTRY_STEP);
spin_lock_bh(&pqueue->lock);
pentry = get_free_pending_entry(pqueue, pqueue->qlen);
}
if (unlikely(!pentry)) {
ret = -ENOSPC;
spin_unlock_bh(&pqueue->lock);
goto request_cleanup;
}
/*
* Check if we are close to filling in entire pending queue,
* if so then tell the sender to stop/sleep by returning -EBUSY
* We do it only for context which can sleep (GFP_KERNEL)
*/
if (gfp == GFP_KERNEL &&
pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
pentry->resume_sender = true;
} else
pentry->resume_sender = false;
resume_sender = pentry->resume_sender;
pqueue->pending_count++;
pentry->completion_addr = info->completion_addr;
pentry->info = info;
pentry->callback = req->callback;
pentry->areq = req->areq;
pentry->busy = true;
info->pentry = pentry;
info->time_in = jiffies;
info->req = req;
/* Fill in the command */
iq_cmd.cmd.u64 = 0;
iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
iq_cmd.dptr = info->dptr_baddr;
iq_cmd.rptr = info->rptr_baddr;
iq_cmd.cptr.u64 = 0;
iq_cmd.cptr.s.grp = ctrl->s.grp;
/* Fill in the CPT_INST_S type command for HW interpretation */
cpt_fill_inst(&cptinst, info, &iq_cmd);
/* Print debug info if enabled */
otx_cpt_dump_sg_list(pdev, req);
pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX_CPT_INST_SIZE);
print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX_CPT_INST_SIZE, false);
pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
cpt_req->dlen, false);
/* Send CPT command */
cpt_send_cmd(&cptinst, cptvf);
/*
* We allocate and prepare pending queue entry in critical section
* together with submitting CPT instruction to CPT instruction queue
* to make sure that order of CPT requests is the same in both
* pending and instruction queues
*/
spin_unlock_bh(&pqueue->lock);
ret = resume_sender ? -EBUSY : -EINPROGRESS;
return ret;
request_cleanup:
do_request_cleanup(pdev, info);
return ret;
}
int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
int cpu_num)
{
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!otx_cpt_device_ready(cptvf)) {
dev_err(&pdev->dev, "CPT Device is not ready\n");
return -ENODEV;
}
if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n",
cptvf->vfid);
return -EINVAL;
} else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
(req->ctrl.s.se_req)) {
dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n",
cptvf->vfid);
return -EINVAL;
}
return process_request(pdev, req, &cptvf->pqinfo.queue[0], cptvf);
}
static int cpt_process_ccode(struct pci_dev *pdev,
union otx_cpt_res_s *cpt_status,
struct otx_cpt_info_buffer *cpt_info,
struct otx_cpt_req_info *req, u32 *res_code)
{
u8 ccode = cpt_status->s.compcode;
union otx_cpt_error_code ecode;
ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer);
switch (ccode) {
case CPT_COMP_E_FAULT:
dev_err(&pdev->dev,
"Request failed with DMA fault\n");
otx_cpt_dump_sg_list(pdev, req);
break;
case CPT_COMP_E_SWERR:
dev_err(&pdev->dev,
"Request failed with software error code %d\n",
ecode.s.ccode);
otx_cpt_dump_sg_list(pdev, req);
break;
case CPT_COMP_E_HWERR:
dev_err(&pdev->dev,
"Request failed with hardware error\n");
otx_cpt_dump_sg_list(pdev, req);
break;
case COMPLETION_CODE_INIT:
/* check for timeout */
if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
dev_warn(&pdev->dev, "Request timed out 0x%p\n", req);
else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;
}
return 1;
case CPT_COMP_E_GOOD:
/* Check microcode completion code */
if (ecode.s.ccode) {
/*
* If requested hmac is truncated and ucode returns
* s/g write length error then we report success
* because ucode writes as many bytes of calculated
* hmac as available in gather buffer and reports
* s/g write length error if number of bytes in gather
* buffer is less than full hmac size.
*/
if (req->is_trunc_hmac &&
ecode.s.ccode == ERR_SCATTER_GATHER_WRITE_LENGTH) {
*res_code = 0;
break;
}
dev_err(&pdev->dev,
"Request failed with software error code 0x%x\n",
ecode.s.ccode);
otx_cpt_dump_sg_list(pdev, req);
break;
}
/* Request has been processed with success */
*res_code = 0;
break;
default:
dev_err(&pdev->dev, "Request returned invalid status\n");
break;
}
return 0;
}
static inline void process_pending_queue(struct pci_dev *pdev,
struct otx_cpt_pending_queue *pqueue)
{
void (*callback)(int status, void *arg1, void *arg2);
struct otx_cpt_pending_entry *resume_pentry = NULL;
struct otx_cpt_pending_entry *pentry = NULL;
struct otx_cpt_info_buffer *cpt_info = NULL;
union otx_cpt_res_s *cpt_status = NULL;
struct otx_cpt_req_info *req = NULL;
struct crypto_async_request *areq;
u32 res_code, resume_index;
while (1) {
spin_lock_bh(&pqueue->lock);
pentry = &pqueue->head[pqueue->front];
if (WARN_ON(!pentry)) {
spin_unlock_bh(&pqueue->lock);
break;
}
res_code = -EINVAL;
if (unlikely(!pentry->busy)) {
spin_unlock_bh(&pqueue->lock);
break;
}
if (unlikely(!pentry->callback)) {
dev_err(&pdev->dev, "Callback NULL\n");
goto process_pentry;
}
cpt_info = pentry->info;
if (unlikely(!cpt_info)) {
dev_err(&pdev->dev, "Pending entry post arg NULL\n");
goto process_pentry;
}
req = cpt_info->req;
if (unlikely(!req)) {
dev_err(&pdev->dev, "Request NULL\n");
goto process_pentry;
}
cpt_status = (union otx_cpt_res_s *) pentry->completion_addr;
if (unlikely(!cpt_status)) {
dev_err(&pdev->dev, "Completion address NULL\n");
goto process_pentry;
}
if (cpt_process_ccode(pdev, cpt_status, cpt_info, req,
&res_code)) {
spin_unlock_bh(&pqueue->lock);
return;
}
cpt_info->pdev = pdev;
process_pentry:
/*
* Check if we should inform sending side to resume
* We do it CPT_IQ_RESUME_MARGIN elements in advance before
* pending queue becomes empty
*/
resume_index = modulo_inc(pqueue->front, pqueue->qlen,
CPT_IQ_RESUME_MARGIN);
resume_pentry = &pqueue->head[resume_index];
if (resume_pentry &&
resume_pentry->resume_sender) {
resume_pentry->resume_sender = false;
callback = resume_pentry->callback;
areq = resume_pentry->areq;
if (callback) {
spin_unlock_bh(&pqueue->lock);
/*
* EINPROGRESS is an indication for sending
* side that it can resume sending requests
*/
callback(-EINPROGRESS, areq, cpt_info);
spin_lock_bh(&pqueue->lock);
}
}
callback = pentry->callback;
areq = pentry->areq;
free_pentry(pentry);
pqueue->pending_count--;
pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
spin_unlock_bh(&pqueue->lock);
/*
* Call callback after current pending entry has been
* processed, we don't do it if the callback pointer is
* invalid.
*/
if (callback)
callback(res_code, areq, cpt_info);
}
}
void otx_cpt_post_process(struct otx_cptvf_wqe *wqe)
{
process_pending_queue(wqe->cptvf->pdev, &wqe->cptvf->pqinfo.queue[0]);
}
| linux-master | drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic PowerPC 44x RNG driver
*
* Copyright 2011 IBM Corporation
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include "crypto4xx_core.h"
#include "crypto4xx_trng.h"
#include "crypto4xx_reg_def.h"
#define PPC4XX_TRNG_CTRL 0x0008
#define PPC4XX_TRNG_CTRL_DALM 0x20
#define PPC4XX_TRNG_STAT 0x0004
#define PPC4XX_TRNG_STAT_B 0x1
#define PPC4XX_TRNG_DATA 0x0000
static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
{
struct crypto4xx_device *dev = (void *)rng->priv;
int busy, i, present = 0;
for (i = 0; i < 20; i++) {
busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
PPC4XX_TRNG_STAT_B);
if (!busy || !wait) {
present = 1;
break;
}
udelay(10);
}
return present;
}
static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
{
struct crypto4xx_device *dev = (void *)rng->priv;
*data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
return 4;
}
static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
{
u32 device_ctrl;
device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
if (enable)
device_ctrl |= PPC4XX_TRNG_EN;
else
device_ctrl &= ~PPC4XX_TRNG_EN;
writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
}
static const struct of_device_id ppc4xx_trng_match[] = {
{ .compatible = "ppc4xx-rng", },
{ .compatible = "amcc,ppc460ex-rng", },
{ .compatible = "amcc,ppc440epx-rng", },
{},
};
void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
{
struct crypto4xx_device *dev = core_dev->dev;
struct device_node *trng = NULL;
struct hwrng *rng = NULL;
int err;
/* Find the TRNG device node and map it */
trng = of_find_matching_node(NULL, ppc4xx_trng_match);
if (!trng || !of_device_is_available(trng)) {
of_node_put(trng);
return;
}
dev->trng_base = of_iomap(trng, 0);
of_node_put(trng);
if (!dev->trng_base)
goto err_out;
rng = kzalloc(sizeof(*rng), GFP_KERNEL);
if (!rng)
goto err_out;
rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
core_dev->trng = rng;
ppc4xx_trng_enable(dev, true);
out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
err = devm_hwrng_register(core_dev->device, core_dev->trng);
if (err) {
ppc4xx_trng_enable(dev, false);
dev_err(core_dev->device, "failed to register hwrng (%d).\n",
err);
goto err_out;
}
return;
err_out:
iounmap(dev->trng_base);
kfree(rng);
dev->trng_base = NULL;
core_dev->trng = NULL;
}
void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
{
if (core_dev && core_dev->trng) {
struct crypto4xx_device *dev = core_dev->dev;
devm_hwrng_unregister(core_dev->device, core_dev->trng);
ppc4xx_trng_enable(dev, false);
iounmap(dev->trng_base);
kfree(core_dev->trng);
}
}
MODULE_ALIAS("ppc4xx_rng");
| linux-master | drivers/crypto/amcc/crypto4xx_trng.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
* All rights reserved. James Hsiao <jhsiao@amcc.com>
*
* This file implements the Linux crypto algorithms.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/hash.h>
#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/sha1.h>
#include <crypto/ctr.h>
#include <crypto/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv,
u32 hdr_proc, u32 h, u32 c, u32 pad_type,
u32 op_grp, u32 op, u32 dir)
{
sa->sa_command_0.w = 0;
sa->sa_command_0.bf.save_hash_state = save_h;
sa->sa_command_0.bf.save_iv = save_iv;
sa->sa_command_0.bf.load_hash_state = ld_h;
sa->sa_command_0.bf.load_iv = ld_iv;
sa->sa_command_0.bf.hdr_proc = hdr_proc;
sa->sa_command_0.bf.hash_alg = h;
sa->sa_command_0.bf.cipher_alg = c;
sa->sa_command_0.bf.pad_type = pad_type & 3;
sa->sa_command_0.bf.extend_pad = pad_type >> 2;
sa->sa_command_0.bf.op_group = op_grp;
sa->sa_command_0.bf.opcode = op;
sa->sa_command_0.bf.dir = dir;
}
static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
u32 hmac_mc, u32 cfb, u32 esn,
u32 sn_mask, u32 mute, u32 cp_pad,
u32 cp_pay, u32 cp_hdr)
{
sa->sa_command_1.w = 0;
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
sa->sa_command_1.bf.feedback_mode = cfb;
sa->sa_command_1.bf.sa_rev = 1;
sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;
sa->sa_command_1.bf.copy_pad = cp_pad;
sa->sa_command_1.bf.copy_payload = cp_pay;
sa->sa_command_1.bf.copy_hdr = cp_hdr;
}
static inline int crypto4xx_crypt(struct skcipher_request *req,
const unsigned int ivlen, bool decrypt,
bool check_blocksize)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE];
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
ctx->sa_len, 0, NULL);
}
int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
{
return crypto4xx_crypt(req, 0, false, true);
}
int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
{
return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
}
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
{
return crypto4xx_crypt(req, 0, true, true);
}
int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
{
return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
}
int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
{
return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
}
int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
{
return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
/*
* AES Functions
*/
static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen,
unsigned char cm,
u8 fb)
{
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
struct dynamic_sa_ctl *sa;
int rc;
if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_128)
return -EINVAL;
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
if (rc)
return rc;
/* Setup SA */
sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
SA_NOT_SAVE_IV : SA_SAVE_IV),
SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
DIR_INBOUND);
set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
fb, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
key, keylen);
sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
/*
* SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
* it's the DIR_(IN|OUT)BOUND that matters
*/
sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
return 0;
}
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
CRYPTO_FEEDBACK_MODE_128BIT_CFB);
}
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
CRYPTO_FEEDBACK_MODE_64BIT_OFB);
}
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
int rc;
rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
if (rc)
return rc;
ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
CTR_RFC3686_NONCE_SIZE]);
return 0;
}
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE / 4] = {
ctx->iv_nonce,
cpu_to_le32p((u32 *) req->iv),
cpu_to_le32p((u32 *) (req->iv + 4)),
cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->cryptlen, iv, AES_IV_SIZE,
ctx->sa_out, ctx->sa_len, 0, NULL);
}
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE / 4] = {
ctx->iv_nonce,
cpu_to_le32p((u32 *) req->iv),
cpu_to_le32p((u32 *) (req->iv + 4)),
cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->cryptlen, iv, AES_IV_SIZE,
ctx->sa_out, ctx->sa_len, 0, NULL);
}
static int
crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
size_t iv_len = crypto_skcipher_ivsize(cipher);
unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
AES_BLOCK_SIZE;
/*
* The hardware uses only the last 32-bits as the counter while the
* kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
* the whole IV is a counter. So fallback if the counter is going to
* overlow.
*/
if (counter + nblks < counter) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
int ret;
skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq)
: crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return ret;
}
return encrypt ? crypto4xx_encrypt_iv_stream(req)
: crypto4xx_decrypt_iv_stream(req);
}
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
int rc;
rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
return crypto4xx_setkey_aes(cipher, key, keylen,
CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
}
int crypto4xx_encrypt_ctr(struct skcipher_request *req)
{
return crypto4xx_ctr_crypt(req, true);
}
int crypto4xx_decrypt_ctr(struct skcipher_request *req)
{
return crypto4xx_ctr_crypt(req, false);
}
static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
unsigned int len,
bool is_ccm, bool decrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
/* authsize has to be a multiple of 4 */
if (aead->authsize & 3)
return true;
/*
* hardware does not handle cases where plaintext
* is less than a block.
*/
if (len < AES_BLOCK_SIZE)
return true;
/* assoc len needs to be a multiple of 4 and <= 1020 */
if (req->assoclen & 0x3 || req->assoclen > 1020)
return true;
/* CCM supports only counter field length of 2 and 4 bytes */
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
return false;
}
static int crypto4xx_aead_fallback(struct aead_request *req,
struct crypto4xx_ctx *ctx, bool do_decrypt)
{
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
aead_request_set_ad(subreq, req->assoclen);
return do_decrypt ? crypto_aead_decrypt(subreq) :
crypto_aead_encrypt(subreq);
}
static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
struct crypto_aead *cipher,
const u8 *key,
unsigned int keylen)
{
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(ctx->sw_cipher.aead,
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
/*
* AES-CCM Functions
*/
int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
struct dynamic_sa_ctl *sa;
int rc = 0;
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
if (rc)
return rc;
/* Setup SA */
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
sa->sa_command_1.bf.key_len = keylen >> 3;
crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_COPY_PAD, SA_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
sa->sa_command_1.bf.key_len = keylen >> 3;
return 0;
}
static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
__le32 iv[16];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
unsigned int len = req->cryptlen;
if (decrypt)
len -= crypto_aead_authsize(aead);
if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
return crypto4xx_aead_fallback(req, ctx, decrypt);
memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
if (req->iv[0] == 1) {
/* CRYPTO_MODE_AES_ICM */
sa->sa_command_1.bf.crypto_mode9_8 = 1;
}
iv[3] = cpu_to_le32(0);
crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
sa, ctx->sa_len, req->assoclen, rctx->dst);
}
int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
{
return crypto4xx_crypt_aes_ccm(req, false);
}
int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
{
return crypto4xx_crypt_aes_ccm(req, true);
}
int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
unsigned int authsize)
{
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
}
/*
* AES-GCM Functions
*/
static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
{
switch (keylen) {
case 16:
case 24:
case 32:
return 0;
default:
return -EINVAL;
}
}
static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
unsigned int keylen)
{
struct crypto_aes_ctx ctx;
uint8_t src[16] = { 0 };
int rc;
rc = aes_expandkey(&ctx, key, keylen);
if (rc) {
pr_err("aes_expandkey() failed: %d\n", rc);
return rc;
}
aes_encrypt(&ctx, src, src);
crypto4xx_memcpy_to_le32(hash_start, src, 16);
memzero_explicit(&ctx, sizeof(ctx));
return 0;
}
int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
struct dynamic_sa_ctl *sa;
int rc = 0;
if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
return -EINVAL;
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
if (rc)
return rc;
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
DIR_INBOUND);
set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_ON, SA_MC_DISABLE,
SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
sa->sa_command_1.bf.key_len = keylen >> 3;
crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
key, keylen);
rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
key, keylen);
if (rc) {
pr_err("GCM hash key setting failed = %d\n", rc);
goto err;
}
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
return 0;
err:
crypto4xx_free_sa(ctx);
return rc;
}
static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
__le32 iv[4];
unsigned int len = req->cryptlen;
if (decrypt)
len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
return crypto4xx_aead_fallback(req, ctx, decrypt);
crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
iv[3] = cpu_to_le32(1);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
decrypt ? ctx->sa_in : ctx->sa_out,
ctx->sa_len, req->assoclen, rctx->dst);
}
int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
{
return crypto4xx_crypt_aes_gcm(req, false);
}
int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
{
return crypto4xx_crypt_aes_gcm(req, true);
}
/*
* HASH SHA1 Functions
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
unsigned int sa_len,
unsigned char ha,
unsigned char hm)
{
struct crypto_alg *alg = tfm->__crt_alg;
struct crypto4xx_alg *my_alg;
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
struct dynamic_sa_hash160 *sa;
int rc;
my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
alg.u.hash);
ctx->dev = my_alg->dev;
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, sa_len);
if (rc)
return rc;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_HASH, DIR_INBOUND);
set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
/* Need to zero hash digest in SA */
memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
return 0;
}
int crypto4xx_hash_init(struct ahash_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
int ds;
struct dynamic_sa_ctl *sa;
sa = ctx->sa_in;
ds = crypto_ahash_digestsize(
__crypto_ahash_cast(req->base.tfm));
sa->sa_command_0.bf.digest_len = ds >> 2;
sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
return 0;
}
int crypto4xx_hash_update(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct scatterlist dst;
unsigned int ds = crypto_ahash_digestsize(ahash);
sg_init_one(&dst, req->result, ds);
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
ctx->sa_len, 0, NULL);
}
int crypto4xx_hash_final(struct ahash_request *req)
{
return 0;
}
int crypto4xx_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct scatterlist dst;
unsigned int ds = crypto_ahash_digestsize(ahash);
sg_init_one(&dst, req->result, ds);
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
ctx->sa_len, 0, NULL);
}
/*
* SHA1 Algorithm
*/
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
{
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
| linux-master | drivers/crypto/amcc/crypto4xx_alg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
* All rights reserved. James Hsiao <jhsiao@amcc.com>
*
* This file implements AMCC crypto offload Linux device driver for use with
* Linux CryptoAPI.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/sha1.h>
#include <crypto/rng.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
#include "crypto4xx_trng.h"
#define PPC4XX_SEC_VERSION_STR "0.5"
/*
* PPC4xx Crypto Engine Initialization Routine
*/
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
{
union ce_ring_size ring_size;
union ce_ring_control ring_ctrl;
union ce_part_ring_size part_ring_size;
union ce_io_threshold io_threshold;
u32 rand_num;
union ce_pe_dma_cfg pe_dma_cfg;
u32 device_ctrl;
writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
/* setup pe dma, include reset sg, pdr and pe, then release reset */
pe_dma_cfg.w = 0;
pe_dma_cfg.bf.bo_sgpd_en = 1;
pe_dma_cfg.bf.bo_data_en = 0;
pe_dma_cfg.bf.bo_sa_en = 1;
pe_dma_cfg.bf.bo_pd_en = 1;
pe_dma_cfg.bf.dynamic_sa_en = 1;
pe_dma_cfg.bf.reset_sg = 1;
pe_dma_cfg.bf.reset_pdr = 1;
pe_dma_cfg.bf.reset_pe = 1;
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
/* un reset pe,sg and pdr */
pe_dma_cfg.bf.pe_mode = 0;
pe_dma_cfg.bf.reset_sg = 0;
pe_dma_cfg.bf.reset_pdr = 0;
pe_dma_cfg.bf.reset_pe = 0;
pe_dma_cfg.bf.bo_td_en = 0;
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
get_random_bytes(&rand_num, sizeof(rand_num));
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
get_random_bytes(&rand_num, sizeof(rand_num));
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
ring_size.w = 0;
ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
ring_size.bf.ring_size = PPC4XX_NUM_PD;
writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
ring_ctrl.w = 0;
writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
device_ctrl |= PPC4XX_DC_3DES_EN;
writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
part_ring_size.w = 0;
part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
io_threshold.w = 0;
io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
/* un reset pe,sg and pdr */
pe_dma_cfg.bf.pe_mode = 1;
pe_dma_cfg.bf.reset_sg = 0;
pe_dma_cfg.bf.reset_pdr = 0;
pe_dma_cfg.bf.reset_pe = 0;
pe_dma_cfg.bf.bo_td_en = 0;
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
/*clear all pending interrupt*/
writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
if (dev->is_revb) {
writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
dev->ce_base + CRYPTO4XX_INT_EN);
} else {
writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
}
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
if (ctx->sa_in == NULL)
return -ENOMEM;
ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
kfree(ctx->sa_in);
ctx->sa_in = NULL;
return -ENOMEM;
}
ctx->sa_len = size;
return 0;
}
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
kfree(ctx->sa_in);
ctx->sa_in = NULL;
kfree(ctx->sa_out);
ctx->sa_out = NULL;
ctx->sa_len = 0;
}
/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
*/
static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_KERNEL);
if (!dev->pdr)
return -ENOMEM;
dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
GFP_KERNEL);
if (!dev->pdr_uinfo) {
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr,
dev->pdr_pa);
return -ENOMEM;
}
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
GFP_KERNEL);
if (!dev->shadow_sa_pool)
return -ENOMEM;
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
&dev->shadow_sr_pool_pa, GFP_KERNEL);
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
struct ce_pd *pd = &dev->pdr[i];
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
pd->sa = dev->shadow_sa_pool_pa +
sizeof(union shadow_sa_buf) * i;
/* alloc 256 bytes which is enough for any kind of dynamic sa */
pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
/* alloc state record */
pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
sizeof(struct sa_state_record) * i;
}
return 0;
}
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
if (dev->pdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
if (dev->shadow_sa_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
kfree(dev->pdr_uinfo);
}
static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
{
u32 retval;
u32 tmp;
retval = dev->pdr_head;
tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
if (tmp == dev->pdr_tail)
return ERING_WAS_FULL;
dev->pdr_head = tmp;
return retval;
}
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
{
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
u32 tail;
unsigned long flags;
spin_lock_irqsave(&dev->core_dev->lock, flags);
pd_uinfo->state = PD_ENTRY_FREE;
if (dev->pdr_tail != PPC4XX_LAST_PD)
dev->pdr_tail++;
else
dev->pdr_tail = 0;
tail = dev->pdr_tail;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return tail;
}
/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
&dev->gdr_pa, GFP_KERNEL);
if (!dev->gdr)
return -ENOMEM;
return 0;
}
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
{
if (dev->gdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
dev->gdr, dev->gdr_pa);
}
/*
* when this function is called.
* preemption or interrupt must be disabled
*/
static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
{
u32 retval;
u32 tmp;
if (n >= PPC4XX_NUM_GD)
return ERING_WAS_FULL;
retval = dev->gdr_head;
tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
if (dev->gdr_head > dev->gdr_tail) {
if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
return ERING_WAS_FULL;
} else if (dev->gdr_head < dev->gdr_tail) {
if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
return ERING_WAS_FULL;
}
dev->gdr_head = tmp;
return retval;
}
static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->core_dev->lock, flags);
if (dev->gdr_tail == dev->gdr_head) {
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return 0;
}
if (dev->gdr_tail != PPC4XX_LAST_GD)
dev->gdr_tail++;
else
dev->gdr_tail = 0;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return 0;
}
static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
dma_addr_t *gd_dma, u32 idx)
{
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
return &dev->gdr[idx];
}
/*
* alloc memory for the scatter ring
* need to alloc buf for the ring
* sdr_tail, sdr_head and sdr_count are initialized by this function
*/
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_KERNEL);
if (!dev->scatter_buffer_va)
return -ENOMEM;
/* alloc memory for scatter descriptor ring */
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
&dev->sdr_pa, GFP_KERNEL);
if (!dev->sdr)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
}
return 0;
}
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
{
if (dev->sdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
dev->scatter_buffer_pa);
}
/*
* when this function is called.
* preemption or interrupt must be disabled
*/
static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
{
u32 retval;
u32 tmp;
if (n >= PPC4XX_NUM_SD)
return ERING_WAS_FULL;
retval = dev->sdr_head;
tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
if (dev->sdr_head > dev->gdr_tail) {
if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
return ERING_WAS_FULL;
} else if (dev->sdr_head < dev->sdr_tail) {
if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
return ERING_WAS_FULL;
} /* the head = tail, or empty case is already take cared */
dev->sdr_head = tmp;
return retval;
}
static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->core_dev->lock, flags);
if (dev->sdr_tail == dev->sdr_head) {
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return 0;
}
if (dev->sdr_tail != PPC4XX_LAST_SD)
dev->sdr_tail++;
else
dev->sdr_tail = 0;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return 0;
}
static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
dma_addr_t *sd_dma, u32 idx)
{
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
return &dev->sdr[idx];
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
struct ce_pd *pd,
struct pd_uinfo *pd_uinfo,
u32 nbytes,
struct scatterlist *dst)
{
unsigned int first_sd = pd_uinfo->first_sd;
unsigned int last_sd;
unsigned int overflow = 0;
unsigned int to_copy;
unsigned int dst_start = 0;
/*
* Because the scatter buffers are all neatly organized in one
* big continuous ringbuffer; scatterwalk_map_and_copy() can
* be instructed to copy a range of buffers in one go.
*/
last_sd = (first_sd + pd_uinfo->num_sd);
if (last_sd > PPC4XX_LAST_SD) {
last_sd = PPC4XX_LAST_SD;
overflow = last_sd % PPC4XX_NUM_SD;
}
while (nbytes) {
void *buf = dev->scatter_buffer_va +
first_sd * PPC4XX_SD_BUFFER_SIZE;
to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
(1 + last_sd - first_sd));
scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
nbytes -= to_copy;
if (overflow) {
first_sd = 0;
last_sd = overflow;
dst_start += to_copy;
overflow = 0;
}
}
}
static void crypto4xx_copy_digest_to_dst(void *dst,
struct pd_uinfo *pd_uinfo,
struct crypto4xx_ctx *ctx)
{
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
memcpy(dst, pd_uinfo->sr_va->save_digest,
SA_HASH_ALG_SHA1_DIGEST_SIZE);
}
}
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
int i;
if (pd_uinfo->num_gd) {
for (i = 0; i < pd_uinfo->num_gd; i++)
crypto4xx_put_gd_to_gdr(dev);
pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
}
if (pd_uinfo->num_sd) {
for (i = 0; i < pd_uinfo->num_sd; i++)
crypto4xx_put_sd_to_sdr(dev);
pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
}
}
static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
struct skcipher_request *req;
struct scatterlist *dst;
req = skcipher_request_cast(pd_uinfo->async_req);
if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
crypto4xx_memcpy_from_le32((u32 *)req->iv,
pd_uinfo->sr_va->save_iv,
crypto_skcipher_ivsize(skcipher));
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
skcipher_request_complete(req, -EINPROGRESS);
skcipher_request_complete(req, 0);
}
static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
struct crypto4xx_ctx *ctx;
struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
ahash_request_complete(ahash_req, -EINPROGRESS);
ahash_request_complete(ahash_req, 0);
}
static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
struct aead_request *aead_req = container_of(pd_uinfo->async_req,
struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
size_t cp_len = crypto_aead_authsize(
crypto_aead_reqtfm(aead_req));
u32 icv[AES_BLOCK_SIZE];
int err = 0;
if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
dst);
} else {
dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
sizeof(icv));
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
} else {
/* check icv at the end */
scatterwalk_map_and_copy(icv, aead_req->src,
aead_req->assoclen + aead_req->cryptlen -
cp_len, cp_len, 0);
crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
if (!__ratelimit(&dev->aead_ratelimit)) {
if (pd->pd_ctl.bf.status & 2)
pr_err("pad fail error\n");
if (pd->pd_ctl.bf.status & 4)
pr_err("seqnum fail\n");
if (pd->pd_ctl.bf.status & 8)
pr_err("error _notify\n");
pr_err("aead return err status = 0x%02x\n",
pd->pd_ctl.bf.status & 0xff);
pr_err("pd pad_ctl = 0x%08x\n",
pd->pd_ctl.bf.pd_pad_ctl);
}
err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)
aead_request_complete(aead_req, -EINPROGRESS);
aead_request_complete(aead_req, err);
}
static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
struct ce_pd *pd = &dev->pdr[idx];
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
case CRYPTO_ALG_TYPE_SKCIPHER:
crypto4xx_cipher_done(dev, pd_uinfo, pd);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
case CRYPTO_ALG_TYPE_AHASH:
crypto4xx_ahash_done(dev, pd_uinfo);
break;
}
}
static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
{
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
iounmap(core_dev->dev->ce_base);
kfree(core_dev->dev);
kfree(core_dev);
}
static u32 get_next_gd(u32 current)
{
if (current != PPC4XX_LAST_GD)
return current + 1;
else
return 0;
}
static u32 get_next_sd(u32 current)
{
if (current != PPC4XX_LAST_SD)
return current + 1;
else
return 0;
}
int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
const unsigned int assoclen,
struct scatterlist *_dst)
{
struct crypto4xx_device *dev = ctx->dev;
struct dynamic_sa_ctl *sa;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
u32 fst_gd = 0xffffffff;
u32 fst_sd = 0xffffffff;
u32 pd_entry;
unsigned long flags;
struct pd_uinfo *pd_uinfo;
unsigned int nbytes = datalen;
size_t offset_to_sr_ptr;
u32 gd_idx = 0;
int tmp;
bool is_busy, force_sd;
/*
* There's a very subtile/disguised "bug" in the hardware that
* gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
* of the hardware spec:
* *drum roll* the AES/(T)DES OFB and CFB modes are listed as
* operation modes for >>> "Block ciphers" <<<.
*
* To workaround this issue and stop the hardware from causing
* "overran dst buffer" on crypttexts that are not a multiple
* of 16 (AES_BLOCK_SIZE), we force the driver to use the
* scatter buffers.
*/
force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
&& (datalen % AES_BLOCK_SIZE);
/* figure how many gd are needed */
tmp = sg_nents_for_len(src, assoclen + datalen);
if (tmp < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
return tmp;
}
if (tmp == 1)
tmp = 0;
num_gd = tmp;
if (assoclen) {
nbytes += assoclen;
dst = scatterwalk_ffwd(_dst, dst, assoclen);
}
/* figure how many sd are needed */
if (sg_is_last(dst) && force_sd == false) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
if (datalen % PPC4XX_SD_BUFFER_SIZE)
num_sd++;
} else {
num_sd = 1;
}
}
/*
* The follow section of code needs to be protected
* The gather ring and scatter ring needs to be consecutive
* In case of run out of any kind of descriptor, the descriptor
* already got must be return the original place.
*/
spin_lock_irqsave(&dev->core_dev->lock, flags);
/*
* Let the caller know to slow down, once more than 13/16ths = 81%
* of the available data contexts are being used simultaneously.
*
* With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
* 31 more contexts. Before new requests have to be rejected.
*/
if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
((PPC4XX_NUM_PD * 13) / 16);
} else {
/*
* To fix contention issues between ipsec (no blacklog) and
* dm-crypto (backlog) reserve 32 entries for "no backlog"
* data contexts.
*/
is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
((PPC4XX_NUM_PD * 15) / 16);
if (is_busy) {
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return -EBUSY;
}
}
if (num_gd) {
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
if (fst_gd == ERING_WAS_FULL) {
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return -EAGAIN;
}
}
if (num_sd) {
fst_sd = crypto4xx_get_n_sd(dev, num_sd);
if (fst_sd == ERING_WAS_FULL) {
if (num_gd)
dev->gdr_head = fst_gd;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return -EAGAIN;
}
}
pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
if (pd_entry == ERING_WAS_FULL) {
if (num_gd)
dev->gdr_head = fst_gd;
if (num_sd)
dev->sdr_head = fst_sd;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
return -EAGAIN;
}
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
pd = &dev->pdr[pd_entry];
pd->sa_len = sa_len;
pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
pd_uinfo->dest_va = dst;
pd_uinfo->async_req = req;
if (iv_len)
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
sa = pd_uinfo->sa_va;
memcpy(sa, req_sa, sa_len * 4);
sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
if (num_gd) {
dma_addr_t gd_dma;
struct scatterlist *sg;
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
pd->src = gd_dma;
/* enable gather */
sa->sa_command_0.bf.gather = 1;
/* walk the sg, and setup gather array */
sg = src;
while (nbytes) {
size_t len;
len = min(sg->length, nbytes);
gd->ptr = dma_map_page(dev->core_dev->device,
sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
gd->ctl_len.len = len;
gd->ctl_len.done = 0;
gd->ctl_len.ready = 1;
if (len >= nbytes)
break;
nbytes -= sg->length;
gd_idx = get_next_gd(gd_idx);
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
sg = sg_next(sg);
}
} else {
pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
src->offset, min(nbytes, src->length),
DMA_TO_DEVICE);
/*
* Disable gather in sa command
*/
sa->sa_command_0.bf.gather = 0;
/*
* Indicate gather array is not used
*/
pd_uinfo->first_gd = 0xffffffff;
}
if (!num_sd) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
*/
pd_uinfo->first_sd = 0xffffffff;
sa->sa_command_0.bf.scatter = 0;
pd->dest = (u32)dma_map_page(dev->core_dev->device,
sg_page(dst), dst->offset,
min(datalen, dst->length),
DMA_TO_DEVICE);
} else {
dma_addr_t sd_dma;
struct ce_sd *sd = NULL;
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
pd_uinfo->first_sd = fst_sd;
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
pd->dest = sd_dma;
/* setup scatter descriptor */
sd->ctl.done = 0;
sd->ctl.rdy = 1;
/* sd->ptr should be setup by sd_init routine*/
if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
nbytes -= PPC4XX_SD_BUFFER_SIZE;
else
nbytes = 0;
while (nbytes) {
sd_idx = get_next_sd(sd_idx);
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
/* setup scatter descriptor */
sd->ctl.done = 0;
sd->ctl.rdy = 1;
if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
nbytes -= PPC4XX_SD_BUFFER_SIZE;
} else {
/*
* SD entry can hold PPC4XX_SD_BUFFER_SIZE,
* which is more than nbytes, so done.
*/
nbytes = 0;
}
}
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
(crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
wmb();
/* write any value to push engine to read a pd */
writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
return is_busy ? -EBUSY : -EINPROGRESS;
}
/*
* Algorithm Registration Functions
*/
static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
struct crypto4xx_ctx *ctx)
{
ctx->dev = amcc_alg->dev;
ctx->sa_in = NULL;
ctx->sa_out = NULL;
ctx->sa_len = 0;
}
static int crypto4xx_sk_init(struct crypto_skcipher *sk)
{
struct skcipher_alg *alg = crypto_skcipher_alg(sk);
struct crypto4xx_alg *amcc_alg;
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher.cipher =
crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher.cipher))
return PTR_ERR(ctx->sw_cipher.cipher);
}
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
crypto4xx_ctx_init(amcc_alg, ctx);
return 0;
}
static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
{
crypto4xx_free_sa(ctx);
}
static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
{
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
crypto4xx_common_exit(ctx);
if (ctx->sw_cipher.cipher)
crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto4xx_alg *amcc_alg;
ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->sw_cipher.aead))
return PTR_ERR(ctx->sw_cipher.aead);
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
crypto4xx_ctx_init(amcc_alg, ctx);
crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
crypto_aead_reqsize(ctx->sw_cipher.aead),
sizeof(struct crypto4xx_aead_reqctx)));
return 0;
}
static void crypto4xx_aead_exit(struct crypto_aead *tfm)
{
struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
crypto4xx_common_exit(ctx);
crypto_free_aead(ctx->sw_cipher.aead);
}
static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
struct crypto4xx_alg_common *crypto_alg,
int array_size)
{
struct crypto4xx_alg *alg;
int i;
int rc = 0;
for (i = 0; i < array_size; i++) {
alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
if (!alg)
return -ENOMEM;
alg->alg = crypto_alg[i];
alg->dev = sec_dev;
switch (alg->alg.type) {
case CRYPTO_ALG_TYPE_AEAD:
rc = crypto_register_aead(&alg->alg.u.aead);
break;
case CRYPTO_ALG_TYPE_AHASH:
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
case CRYPTO_ALG_TYPE_RNG:
rc = crypto_register_rng(&alg->alg.u.rng);
break;
default:
rc = crypto_register_skcipher(&alg->alg.u.cipher);
break;
}
if (rc)
kfree(alg);
else
list_add_tail(&alg->entry, &sec_dev->alg_list);
}
return 0;
}
static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
{
struct crypto4xx_alg *alg, *tmp;
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
switch (alg->alg.type) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&alg->alg.u.hash);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&alg->alg.u.aead);
break;
case CRYPTO_ALG_TYPE_RNG:
crypto_unregister_rng(&alg->alg.u.rng);
break;
default:
crypto_unregister_skcipher(&alg->alg.u.cipher);
}
kfree(alg);
}
}
static void crypto4xx_bh_tasklet_cb(unsigned long data)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
struct pd_uinfo *pd_uinfo;
struct ce_pd *pd;
u32 tail = core_dev->dev->pdr_tail;
u32 head = core_dev->dev->pdr_head;
do {
pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
pd = &core_dev->dev->pdr[tail];
if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
((READ_ONCE(pd->pd_ctl.w) &
(PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
PD_CTL_PE_DONE)) {
crypto4xx_pd_done(core_dev->dev, tail);
tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
} else {
/* if tail not done, break */
break;
}
} while (head != tail);
}
/*
* Top Half of isr.
*/
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
u32 clr_val)
{
struct device *dev = data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
{
return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
}
static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
{
return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
PPC4XX_TMO_ERR_INT);
}
static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
u8 *data, unsigned int max)
{
unsigned int i, curr = 0;
u32 val[2];
do {
/* trigger PRN generation */
writel(PPC4XX_PRNG_CTRL_AUTO_EN,
dev->ce_base + CRYPTO4XX_PRNG_CTRL);
for (i = 0; i < 1024; i++) {
/* usually 19 iterations are enough */
if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
CRYPTO4XX_PRNG_STAT_BUSY))
continue;
val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
break;
}
if (i == 1024)
return -ETIMEDOUT;
if ((max - curr) >= 8) {
memcpy(data, &val, 8);
data += 8;
curr += 8;
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - curr);
break;
}
} while (curr < max);
return curr;
}
static int crypto4xx_prng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dstn, unsigned int dlen)
{
struct rng_alg *alg = crypto_rng_alg(tfm);
struct crypto4xx_alg *amcc_alg;
struct crypto4xx_device *dev;
int ret;
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
dev = amcc_alg->dev;
mutex_lock(&dev->core_dev->rng_lock);
ret = ppc4xx_prng_data_read(dev, dstn, dlen);
mutex_unlock(&dev->core_dev->rng_lock);
return ret;
}
static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
unsigned int slen)
{
return 0;
}
/*
* Supported Crypto Algorithms
*/
static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cbc,
.encrypt = crypto4xx_encrypt_iv_block,
.decrypt = crypto4xx_decrypt_iv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cfb,
.encrypt = crypto4xx_encrypt_iv_stream,
.decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ctr,
.encrypt = crypto4xx_encrypt_ctr,
.decrypt = crypto4xx_decrypt_ctr,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.setkey = crypto4xx_setkey_rfc3686,
.encrypt = crypto4xx_rfc3686_encrypt,
.decrypt = crypto4xx_rfc3686_decrypt,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = crypto4xx_setkey_aes_ecb,
.encrypt = crypto4xx_encrypt_noiv_block,
.decrypt = crypto4xx_decrypt_noiv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ofb,
.encrypt = crypto4xx_encrypt_iv_stream,
.decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
/* AEAD */
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
.setkey = crypto4xx_setkey_aes_ccm,
.setauthsize = crypto4xx_setauthsize_aead,
.encrypt = crypto4xx_encrypt_aes_ccm,
.decrypt = crypto4xx_decrypt_aes_ccm,
.init = crypto4xx_aead_init,
.exit = crypto4xx_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
} },
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
.setkey = crypto4xx_setkey_aes_gcm,
.setauthsize = crypto4xx_setauthsize_aead,
.encrypt = crypto4xx_encrypt_aes_gcm,
.decrypt = crypto4xx_decrypt_aes_gcm,
.init = crypto4xx_aead_init,
.exit = crypto4xx_aead_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
} },
{ .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "crypto4xx_rng",
.cra_priority = 300,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
},
.generate = crypto4xx_prng_generate,
.seed = crypto4xx_prng_seed,
.seedsize = 0,
} },
};
/*
* Module Initialization Routine
*/
static int crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
struct device_node *np;
u32 pvr;
bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
return -ENODEV;
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
if (np) {
mtdcri(SDR0, PPC460EX_SDR0_SRST,
mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
mtdcri(SDR0, PPC460EX_SDR0_SRST,
mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
} else {
np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto");
if (np) {
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
is_revb = false;
} else {
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto");
if (np) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
mtdcri(SDR0, PPC460SX_SDR0_SRST,
mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
} else {
printk(KERN_ERR "Crypto Function Not supported!\n");
return -EINVAL;
}
}
}
of_node_put(np);
core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
if (!core_dev)
return -ENOMEM;
dev_set_drvdata(dev, core_dev);
core_dev->ofdev = ofdev;
core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
rc = -ENOMEM;
if (!core_dev->dev)
goto err_alloc_dev;
/*
* Older version of 460EX/GT have a hardware bug.
* Hence they do not support H/W based security intr coalescing
*/
pvr = mfspr(SPRN_PVR);
if (is_revb && ((pvr >> 4) == 0x130218A)) {
u32 min = PVR_MIN(pvr);
if (min < 4) {
dev_info(dev, "RevA detected - disable interrupt coalescing\n");
is_revb = false;
}
}
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
mutex_init(&core_dev->rng_lock);
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
rc = crypto4xx_build_sdr(core_dev->dev);
if (rc)
goto err_build_sdr;
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
goto err_build_sdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
goto err_build_sdr;
/* Init tasklet for bottom half processing */
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
rc = -ENOMEM;
goto err_iomap;
}
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
rc = request_irq(core_dev->irq, is_revb ?
crypto4xx_ce_interrupt_handler_revb :
crypto4xx_ce_interrupt_handler, 0,
KBUILD_MODNAME, dev);
if (rc)
goto err_request_irq;
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
/* Register security algorithms with Linux CryptoAPI */
rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
ARRAY_SIZE(crypto4xx_alg));
if (rc)
goto err_start_dev;
ppc4xx_trng_probe(core_dev);
return 0;
err_start_dev:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
iounmap(core_dev->dev->ce_base);
err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);
return rc;
}
static int crypto4xx_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
ppc4xx_trng_remove(core_dev);
free_irq(core_dev->irq, dev);
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
return 0;
}
static const struct of_device_id crypto4xx_match[] = {
{ .compatible = "amcc,ppc4xx-crypto",},
{ },
};
MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
.remove = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
| linux-master | drivers/crypto/amcc/crypto4xx_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include "cipher.h"
#include "common.h"
#include "core.h"
#include "regs-v5.h"
#include "sha.h"
#include "aead.h"
static inline u32 qce_read(struct qce_device *qce, u32 offset)
{
return readl(qce->base + offset);
}
static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
{
writel(val, qce->base + offset);
}
static inline void qce_write_array(struct qce_device *qce, u32 offset,
const u32 *val, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
qce_write(qce, offset + i * sizeof(u32), val[i]);
}
static inline void
qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
qce_write(qce, offset + i * sizeof(u32), 0);
}
static u32 qce_config_reg(struct qce_device *qce, int little)
{
u32 beats = (qce->burst_size >> 3) - 1;
u32 pipe_pair = qce->pipe_pair_id;
u32 config;
config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
config &= ~HIGH_SPD_EN_N_SHIFT;
if (little)
config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
return config;
}
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
{
__be32 *d = dst;
const u8 *s = src;
unsigned int n;
n = len / sizeof(u32);
for (; n > 0; n--) {
*d = cpu_to_be32p((const __u32 *) s);
s += sizeof(__u32);
d++;
}
}
static void qce_setup_config(struct qce_device *qce)
{
u32 config;
/* get big endianness */
config = qce_config_reg(qce, 0);
/* clear status */
qce_write(qce, REG_STATUS, 0);
qce_write(qce, REG_CONFIG, config);
}
static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
{
if (result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
else
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
}
#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
{
u32 cfg = 0;
if (IS_CCM(flags) || IS_CMAC(flags))
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
else
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
if (IS_CCM(flags) || IS_CMAC(flags)) {
if (key_size == AES_KEYSIZE_128)
cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
else if (key_size == AES_KEYSIZE_256)
cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
}
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
else if (IS_CCM(flags))
cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
if (IS_SHA1(flags) || IS_SHA256(flags))
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
else if (IS_CCM(flags))
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
if (IS_CCM(flags))
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
return cfg;
}
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
struct qce_device *qce = tmpl->qce;
unsigned int digestsize = crypto_ahash_digestsize(ahash);
unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
__be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
__be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
u32 auth_cfg = 0, config;
unsigned int iv_words;
/* if not the last, the size has to be on the block boundary */
if (!rctx->last_blk && req->nbytes % blocksize)
return -EINVAL;
qce_setup_config(qce);
if (IS_CMAC(rctx->flags)) {
qce_write(qce, REG_AUTH_SEG_CFG, 0);
qce_write(qce, REG_ENCR_SEG_CFG, 0);
qce_write(qce, REG_ENCR_SEG_SIZE, 0);
qce_clear_array(qce, REG_AUTH_IV0, 16);
qce_clear_array(qce, REG_AUTH_KEY0, 16);
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, digestsize);
}
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
u32 authkey_words = rctx->authklen / sizeof(u32);
qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
authkey_words);
}
if (IS_CMAC(rctx->flags))
goto go_proc;
if (rctx->first_blk)
memcpy(auth, rctx->digest, digestsize);
else
qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
if (rctx->first_blk)
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
else
qce_write_array(qce, REG_AUTH_BYTECNT0,
(u32 *)rctx->byte_count, 2);
auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
if (rctx->last_blk)
auth_cfg |= BIT(AUTH_LAST_SHIFT);
else
auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
if (rctx->first_blk)
auth_cfg |= BIT(AUTH_FIRST_SHIFT);
else
auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
go_proc:
qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
qce_write(qce, REG_AUTH_SEG_START, 0);
qce_write(qce, REG_ENCR_SEG_CFG, 0);
qce_write(qce, REG_SEG_SIZE, req->nbytes);
/* get little endianness */
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
qce_crypto_go(qce, true);
return 0;
}
#endif
#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
{
u32 cfg = 0;
if (IS_AES(flags)) {
if (aes_key_size == AES_KEYSIZE_128)
cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
else if (aes_key_size == AES_KEYSIZE_256)
cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
}
if (IS_AES(flags))
cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
else if (IS_DES(flags) || IS_3DES(flags))
cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
if (IS_DES(flags))
cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
if (IS_3DES(flags))
cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
switch (flags & QCE_MODE_MASK) {
case QCE_MODE_ECB:
cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
break;
case QCE_MODE_CBC:
cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
break;
case QCE_MODE_CTR:
cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
break;
case QCE_MODE_XTS:
cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
break;
case QCE_MODE_CCM:
cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
break;
default:
return ~0;
}
return cfg;
}
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
{
u8 swap[QCE_AES_IV_LENGTH];
u32 i, j;
if (ivsize > QCE_AES_IV_LENGTH)
return;
memset(swap, 0, QCE_AES_IV_LENGTH);
for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
i < QCE_AES_IV_LENGTH; i++, j--)
swap[i] = src[j];
qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
}
static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
unsigned int enckeylen, unsigned int cryptlen)
{
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
/* Set data unit size to cryptlen. Anything else causes
* crypto engine to return back incorrect results.
*/
qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
unsigned int enckey_words, enciv_words;
unsigned int keylen;
u32 encr_cfg = 0, auth_cfg = 0, config;
unsigned int ivsize = rctx->ivsize;
unsigned long flags = rctx->flags;
qce_setup_config(qce);
if (IS_XTS(flags))
keylen = ctx->enc_keylen / 2;
else
keylen = ctx->enc_keylen;
qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
enckey_words = keylen / sizeof(u32);
qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
encr_cfg = qce_encr_cfg(flags, keylen);
if (IS_DES(flags)) {
enciv_words = 2;
enckey_words = 2;
} else if (IS_3DES(flags)) {
enciv_words = 2;
enckey_words = 6;
} else if (IS_AES(flags)) {
if (IS_XTS(flags))
qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
rctx->cryptlen);
enciv_words = 4;
} else {
return -EINVAL;
}
qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
if (!IS_ECB(flags)) {
if (IS_XTS(flags))
qce_xts_swapiv(enciv, rctx->iv, ivsize);
else
qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
}
if (IS_ENCRYPT(flags))
encr_cfg |= BIT(ENCODE_SHIFT);
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
qce_write(qce, REG_ENCR_SEG_START, 0);
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
qce_write(qce, REG_CNTR_MASK0, ~0);
qce_write(qce, REG_CNTR_MASK1, ~0);
qce_write(qce, REG_CNTR_MASK2, ~0);
}
qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
/* get little endianness */
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
qce_crypto_go(qce, true);
return 0;
}
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
};
static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
};
static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int len)
{
u32 *d = dst;
const u8 *s = src;
unsigned int n;
n = len / sizeof(u32);
for (; n > 0; n--) {
*d = be32_to_cpup((const __be32 *)s);
s += sizeof(u32);
d++;
}
return DIV_ROUND_UP(len, sizeof(u32));
}
static int qce_setup_regs_aead(struct crypto_async_request *async_req)
{
struct aead_request *req = aead_request_cast(async_req);
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
unsigned int enc_keylen = ctx->enc_keylen;
unsigned int auth_keylen = ctx->auth_keylen;
unsigned int enc_ivsize = rctx->ivsize;
unsigned int auth_ivsize = 0;
unsigned int enckey_words, enciv_words;
unsigned int authkey_words, authiv_words, authnonce_words;
unsigned long flags = rctx->flags;
u32 encr_cfg, auth_cfg, config, totallen;
u32 iv_last_word;
qce_setup_config(qce);
/* Write encryption key */
enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen);
qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words);
/* Write encryption iv */
enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv, enc_ivsize);
qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words);
if (IS_CCM(rctx->flags)) {
iv_last_word = enciv[enciv_words - 1];
qce_write(qce, REG_CNTR3_IV3, iv_last_word + 1);
qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, enciv_words);
qce_write(qce, REG_CNTR_MASK, ~0);
qce_write(qce, REG_CNTR_MASK0, ~0);
qce_write(qce, REG_CNTR_MASK1, ~0);
qce_write(qce, REG_CNTR_MASK2, ~0);
}
/* Clear authentication IV and KEY registers of previous values */
qce_clear_array(qce, REG_AUTH_IV0, 16);
qce_clear_array(qce, REG_AUTH_KEY0, 16);
/* Clear byte count */
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
/* Write authentication key */
authkey_words = qce_be32_to_cpu_array(authkey, ctx->auth_key, auth_keylen);
qce_write_array(qce, REG_AUTH_KEY0, (u32 *)authkey, authkey_words);
/* Write initial authentication IV only for HMAC algorithms */
if (IS_SHA_HMAC(rctx->flags)) {
/* Write default authentication iv */
if (IS_SHA1_HMAC(rctx->flags)) {
auth_ivsize = SHA1_DIGEST_SIZE;
memcpy(authiv, std_iv_sha1, auth_ivsize);
} else if (IS_SHA256_HMAC(rctx->flags)) {
auth_ivsize = SHA256_DIGEST_SIZE;
memcpy(authiv, std_iv_sha256, auth_ivsize);
}
authiv_words = auth_ivsize / sizeof(u32);
qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words);
} else if (IS_CCM(rctx->flags)) {
/* Write nonce for CCM algorithms */
authnonce_words = qce_be32_to_cpu_array(authnonce, rctx->ccm_nonce, QCE_MAX_NONCE);
qce_write_array(qce, REG_AUTH_INFO_NONCE0, authnonce, authnonce_words);
}
/* Set up ENCR_SEG_CFG */
encr_cfg = qce_encr_cfg(flags, enc_keylen);
if (IS_ENCRYPT(flags))
encr_cfg |= BIT(ENCODE_SHIFT);
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
/* Set up AUTH_SEG_CFG */
auth_cfg = qce_auth_cfg(rctx->flags, auth_keylen, ctx->authsize);
auth_cfg |= BIT(AUTH_LAST_SHIFT);
auth_cfg |= BIT(AUTH_FIRST_SHIFT);
if (IS_ENCRYPT(flags)) {
if (IS_CCM(rctx->flags))
auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
else
auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
} else {
if (IS_CCM(rctx->flags))
auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
else
auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
}
qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
totallen = rctx->cryptlen + rctx->assoclen;
/* Set the encryption size and start offset */
if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen + ctx->authsize);
else
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
qce_write(qce, REG_ENCR_SEG_START, rctx->assoclen & 0xffff);
/* Set the authentication size and start offset */
qce_write(qce, REG_AUTH_SEG_SIZE, totallen);
qce_write(qce, REG_AUTH_SEG_START, 0);
/* Write total length */
if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
qce_write(qce, REG_SEG_SIZE, totallen + ctx->authsize);
else
qce_write(qce, REG_SEG_SIZE, totallen);
/* get little endianness */
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
/* Start the process */
qce_crypto_go(qce, !IS_CCM(flags));
return 0;
}
#endif
int qce_start(struct crypto_async_request *async_req, u32 type)
{
switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
case CRYPTO_ALG_TYPE_AEAD:
return qce_setup_regs_aead(async_req);
#endif
default:
return -EINVAL;
}
}
#define STATUS_ERRORS \
(BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
int qce_check_status(struct qce_device *qce, u32 *status)
{
int ret = 0;
*status = qce_read(qce, REG_STATUS);
/*
* Don't use result dump status. The operation may not be complete.
* Instead, use the status we just read from device. In case, we need to
* use result_status from result dump the result_status needs to be byte
* swapped, since we set the device to little endian.
*/
if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
ret = -ENXIO;
else if (*status & BIT(MAC_FAILED_SHIFT))
ret = -EBADMSG;
return ret;
}
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
{
u32 val;
val = qce_read(qce, REG_VERSION);
*major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
*minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
*step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
}
| linux-master | drivers/crypto/qce/common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021, Linaro Limited. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <crypto/gcm.h>
#include <crypto/authenc.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/des.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/scatterwalk.h>
#include "aead.h"
#define CCM_NONCE_ADATA_SHIFT 6
#define CCM_NONCE_AUTHSIZE_SHIFT 3
#define MAX_CCM_ADATA_HEADER_LEN 6
static LIST_HEAD(aead_algs);
static void qce_aead_done(void *data)
{
struct crypto_async_request *async_req = data;
struct aead_request *req = aead_request_cast(async_req);
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
bool diff_dst;
int error;
u32 status;
unsigned int totallen;
unsigned char tag[SHA256_DIGEST_SIZE] = {0};
int ret = 0;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
error = qce_dma_terminate_all(&qce->dma);
if (error)
dev_dbg(qce->dev, "aead dma termination error (%d)\n",
error);
if (diff_dst)
dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
if (IS_CCM(rctx->flags)) {
if (req->assoclen) {
sg_free_table(&rctx->src_tbl);
if (diff_dst)
sg_free_table(&rctx->dst_tbl);
} else {
if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
sg_free_table(&rctx->dst_tbl);
}
} else {
sg_free_table(&rctx->dst_tbl);
}
error = qce_check_status(qce, &status);
if (error < 0 && (error != -EBADMSG))
dev_err(qce->dev, "aead operation error (%x)\n", status);
if (IS_ENCRYPT(rctx->flags)) {
totallen = req->cryptlen + req->assoclen;
if (IS_CCM(rctx->flags))
scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
totallen, ctx->authsize, 1);
else
scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
totallen, ctx->authsize, 1);
} else if (!IS_CCM(rctx->flags)) {
totallen = req->cryptlen + req->assoclen - ctx->authsize;
scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
if (ret) {
pr_err("Bad message error\n");
error = -EBADMSG;
}
}
qce->async_req_done(qce, error);
}
static struct scatterlist *
qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
{
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
}
static struct scatterlist *
qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
{
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
}
static struct scatterlist *
qce_aead_prepare_dst_buf(struct aead_request *req)
{
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
struct scatterlist *sg, *msg_sg, __sg[2];
gfp_t gfp;
unsigned int assoclen = req->assoclen;
unsigned int totallen;
int ret;
totallen = rctx->cryptlen + assoclen;
rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
if (rctx->dst_nents < 0) {
dev_err(qce->dev, "Invalid numbers of dst SG.\n");
return ERR_PTR(-EINVAL);
}
if (IS_CCM(rctx->flags))
rctx->dst_nents += 2;
else
rctx->dst_nents += 1;
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
if (ret)
return ERR_PTR(ret);
if (IS_CCM(rctx->flags) && assoclen) {
/* Get the dst buffer */
msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
rctx->assoclen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto dst_tbl_free;
}
/* dst buffer */
sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto dst_tbl_free;
}
totallen = rctx->cryptlen + rctx->assoclen;
} else {
if (totallen) {
sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
if (IS_ERR(sg))
goto dst_tbl_free;
}
}
if (IS_CCM(rctx->flags))
sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
else
sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
if (IS_ERR(sg))
goto dst_tbl_free;
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;
rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
return sg;
dst_tbl_free:
sg_free_table(&rctx->dst_tbl);
return sg;
}
static int
qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
{
struct scatterlist *sg, *msg_sg, __sg[2];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int assoclen = rctx->assoclen;
unsigned int adata_header_len, cryptlen, totallen;
gfp_t gfp;
bool diff_dst;
int ret;
if (IS_DECRYPT(rctx->flags))
cryptlen = rctx->cryptlen + ctx->authsize;
else
cryptlen = rctx->cryptlen;
totallen = cryptlen + req->assoclen;
/* Get the msg */
msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
sizeof(unsigned char), GFP_ATOMIC);
if (!rctx->adata)
return -ENOMEM;
/*
* Format associated data (RFC3610 and NIST 800-38C)
* Even though specification allows for AAD to be up to 2^64 - 1 bytes,
* the assoclen field in aead_request is unsigned int and thus limits
* the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
* while forming the header for AAD.
*/
if (assoclen < 0xff00) {
adata_header_len = 2;
*(__be16 *)rctx->adata = cpu_to_be16(assoclen);
} else {
adata_header_len = 6;
*(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
*(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
}
/* Copy the associated data */
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
rctx->adata + adata_header_len,
assoclen) != assoclen)
return -EINVAL;
/* Pad associated data to block size */
rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
diff_dst = (req->src != req->dst) ? true : false;
if (diff_dst)
rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
else
rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
if (ret)
return ret;
/* Associated Data */
sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
rctx->assoclen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto err_free;
}
/* src msg */
sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto err_free;
}
if (!diff_dst) {
/*
* For decrypt, when src and dst buffers are same, there is already space
* in the buffer for padded 0's which is output in lieu of
* the MAC that is input. So skip the below.
*/
if (!IS_DECRYPT(rctx->flags)) {
sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto err_free;
}
}
}
sg_mark_end(sg);
rctx->src_sg = rctx->src_tbl.sgl;
totallen = cryptlen + rctx->assoclen;
rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
if (diff_dst) {
sg = qce_aead_prepare_dst_buf(req);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto err_free;
}
} else {
if (IS_ENCRYPT(rctx->flags))
rctx->dst_nents = rctx->src_nents + 1;
else
rctx->dst_nents = rctx->src_nents;
rctx->dst_sg = rctx->src_sg;
}
return 0;
err_free:
sg_free_table(&rctx->src_tbl);
return ret;
}
static int qce_aead_prepare_buf(struct aead_request *req)
{
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
struct scatterlist *sg;
bool diff_dst = (req->src != req->dst) ? true : false;
unsigned int totallen;
totallen = rctx->cryptlen + rctx->assoclen;
sg = qce_aead_prepare_dst_buf(req);
if (IS_ERR(sg))
return PTR_ERR(sg);
if (diff_dst) {
rctx->src_nents = sg_nents_for_len(req->src, totallen);
if (rctx->src_nents < 0) {
dev_err(qce->dev, "Invalid numbers of src SG.\n");
return -EINVAL;
}
rctx->src_sg = req->src;
} else {
rctx->src_nents = rctx->dst_nents - 1;
rctx->src_sg = rctx->dst_sg;
}
return 0;
}
static int qce_aead_ccm_prepare_buf(struct aead_request *req)
{
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct scatterlist *sg;
bool diff_dst = (req->src != req->dst) ? true : false;
unsigned int cryptlen;
if (rctx->assoclen)
return qce_aead_ccm_prepare_buf_assoclen(req);
if (IS_ENCRYPT(rctx->flags))
return qce_aead_prepare_buf(req);
cryptlen = rctx->cryptlen + ctx->authsize;
if (diff_dst) {
rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
rctx->src_sg = req->src;
sg = qce_aead_prepare_dst_buf(req);
if (IS_ERR(sg))
return PTR_ERR(sg);
} else {
rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
rctx->src_sg = req->src;
rctx->dst_nents = rctx->src_nents;
rctx->dst_sg = rctx->src_sg;
}
return 0;
}
static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
{
unsigned int msglen_size, ivsize;
u8 msg_len[4];
int i;
if (!rctx || !rctx->iv)
return -EINVAL;
msglen_size = rctx->iv[0] + 1;
/* Verify that msg len size is valid */
if (msglen_size < 2 || msglen_size > 8)
return -EINVAL;
ivsize = rctx->ivsize;
/*
* Clear the msglen bytes in IV.
* Else the h/w engine and nonce will use any stray value pending there.
*/
if (!IS_CCM_RFC4309(rctx->flags)) {
for (i = 0; i < msglen_size; i++)
rctx->iv[ivsize - i - 1] = 0;
}
/*
* The crypto framework encodes cryptlen as unsigned int. Thus, even though
* spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
*/
if (msglen_size > 4)
msglen_size = 4;
memcpy(&msg_len[0], &rctx->cryptlen, 4);
memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
if (rctx->assoclen)
rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
CCM_NONCE_AUTHSIZE_SHIFT;
for (i = 0; i < msglen_size; i++)
rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
return 0;
}
static int
qce_aead_async_req_handle(struct crypto_async_request *async_req)
{
struct aead_request *req = aead_request_cast(async_req);
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
bool diff_dst;
int dst_nents, src_nents, ret;
if (IS_CCM_RFC4309(rctx->flags)) {
memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
rctx->ccm_rfc4309_iv[0] = 3;
memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
rctx->iv = rctx->ccm_rfc4309_iv;
rctx->ivsize = AES_BLOCK_SIZE;
} else {
rctx->iv = req->iv;
rctx->ivsize = crypto_aead_ivsize(tfm);
}
if (IS_CCM_RFC4309(rctx->flags))
rctx->assoclen = req->assoclen - 8;
else
rctx->assoclen = req->assoclen;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
if (IS_CCM(rctx->flags)) {
ret = qce_aead_create_ccm_nonce(rctx, ctx);
if (ret)
return ret;
}
if (IS_CCM(rctx->flags))
ret = qce_aead_ccm_prepare_buf(req);
else
ret = qce_aead_prepare_buf(req);
if (ret)
return ret;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
if (!dst_nents) {
ret = -EIO;
goto error_free;
}
if (diff_dst) {
src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
if (src_nents < 0) {
ret = src_nents;
goto error_unmap_dst;
}
} else {
if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
src_nents = dst_nents;
else
src_nents = dst_nents - 1;
}
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
qce_aead_done, async_req);
if (ret)
goto error_unmap_src;
qce_dma_issue_pending(&qce->dma);
ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
return 0;
error_terminate:
qce_dma_terminate_all(&qce->dma);
error_unmap_src:
if (diff_dst)
dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
if (IS_CCM(rctx->flags) && rctx->assoclen) {
sg_free_table(&rctx->src_tbl);
if (diff_dst)
sg_free_table(&rctx->dst_tbl);
} else {
sg_free_table(&rctx->dst_tbl);
}
return ret;
}
static int qce_aead_crypt(struct aead_request *req, int encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
unsigned int blocksize = crypto_aead_blocksize(tfm);
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
if (encrypt)
rctx->cryptlen = req->cryptlen;
else
rctx->cryptlen = req->cryptlen - ctx->authsize;
/* CE does not handle 0 length messages */
if (!rctx->cryptlen) {
if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
ctx->need_fallback = true;
}
/* If fallback is needed, schedule and exit */
if (ctx->need_fallback) {
/* Reset need_fallback in case the same ctx is used for another transaction */
ctx->need_fallback = false;
aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
aead_request_set_callback(&rctx->fallback_req, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
aead_request_set_ad(&rctx->fallback_req, req->assoclen);
return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
crypto_aead_decrypt(&rctx->fallback_req);
}
/*
* CBC algorithms require message lengths to be
* multiples of block size.
*/
if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
return -EINVAL;
/* RFC4309 supported AAD size 16 bytes/20 bytes */
if (IS_CCM_RFC4309(rctx->flags))
if (crypto_ipsec_check_assoclen(req->assoclen))
return -EINVAL;
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}
static int qce_aead_encrypt(struct aead_request *req)
{
return qce_aead_crypt(req, 1);
}
static int qce_aead_decrypt(struct aead_request *req)
{
return qce_aead_crypt(req, 0);
}
static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
if (IS_CCM_RFC4309(flags)) {
if (keylen < QCE_CCM4309_SALT_SIZE)
return -EINVAL;
keylen -= QCE_CCM4309_SALT_SIZE;
memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
}
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
return -EINVAL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
memcpy(ctx->auth_key, key, keylen);
if (keylen == AES_KEYSIZE_192)
ctx->need_fallback = true;
return IS_CCM_RFC4309(flags) ?
crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
crypto_aead_setkey(ctx->fallback, key, keylen);
}
static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys authenc_keys;
unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
u32 _key[6];
int err;
err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
if (err)
return err;
if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
return -EINVAL;
if (IS_DES(flags)) {
err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
if (err)
return err;
} else if (IS_3DES(flags)) {
err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
if (err)
return err;
/*
* The crypto engine does not support any two keys
* being the same for triple des algorithms. The
* verify_skcipher_des3_key does not check for all the
* below conditions. Schedule fallback in this case.
*/
memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
!((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
!((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
ctx->need_fallback = true;
} else if (IS_AES(flags)) {
/* No random key sizes */
if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
authenc_keys.enckeylen != AES_KEYSIZE_192 &&
authenc_keys.enckeylen != AES_KEYSIZE_256)
return -EINVAL;
if (authenc_keys.enckeylen == AES_KEYSIZE_192)
ctx->need_fallback = true;
}
ctx->enc_keylen = authenc_keys.enckeylen;
ctx->auth_keylen = authenc_keys.authkeylen;
memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
return crypto_aead_setkey(ctx->fallback, key, keylen);
}
static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
if (IS_CCM(flags)) {
if (authsize < 4 || authsize > 16 || authsize % 2)
return -EINVAL;
if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
return -EINVAL;
}
ctx->authsize = authsize;
return crypto_aead_setauthsize(ctx->fallback, authsize);
}
static int qce_aead_init(struct crypto_aead *tfm)
{
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
ctx->need_fallback = false;
ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
crypto_aead_set_reqsize_dma(tfm, sizeof(struct qce_aead_reqctx) +
crypto_aead_reqsize(ctx->fallback));
return 0;
}
static void qce_aead_exit(struct crypto_aead *tfm)
{
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->fallback);
}
struct qce_aead_def {
unsigned long flags;
const char *name;
const char *drv_name;
unsigned int blocksize;
unsigned int chunksize;
unsigned int ivsize;
unsigned int maxauthsize;
};
static const struct qce_aead_def aead_def[] = {
{
.flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
.name = "authenc(hmac(sha1),cbc(des))",
.drv_name = "authenc-hmac-sha1-cbc-des-qce",
.blocksize = DES_BLOCK_SIZE,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
{
.flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.drv_name = "authenc-hmac-sha1-cbc-3des-qce",
.blocksize = DES3_EDE_BLOCK_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
{
.flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
.name = "authenc(hmac(sha256),cbc(des))",
.drv_name = "authenc-hmac-sha256-cbc-des-qce",
.blocksize = DES_BLOCK_SIZE,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
{
.flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.drv_name = "authenc-hmac-sha256-cbc-3des-qce",
.blocksize = DES3_EDE_BLOCK_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
{
.flags = QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
.name = "authenc(hmac(sha256),cbc(aes))",
.drv_name = "authenc-hmac-sha256-cbc-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
{
.flags = QCE_ALG_AES | QCE_MODE_CCM,
.name = "ccm(aes)",
.drv_name = "ccm-aes-qce",
.blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
{
.flags = QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
.name = "rfc4309(ccm(aes))",
.drv_name = "rfc4309-ccm-aes-qce",
.blocksize = 1,
.ivsize = 8,
.maxauthsize = AES_BLOCK_SIZE,
},
};
static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
{
struct qce_alg_template *tmpl;
struct aead_alg *alg;
int ret;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl)
return -ENOMEM;
alg = &tmpl->alg.aead;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->maxauthsize = def->maxauthsize;
if (IS_CCM(def->flags))
alg->setkey = qce_aead_ccm_setkey;
else
alg->setkey = qce_aead_setkey;
alg->setauthsize = qce_aead_setauthsize;
alg->encrypt = qce_aead_encrypt;
alg->decrypt = qce_aead_decrypt;
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
alg->base.cra_ctxsize = sizeof(struct qce_aead_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
tmpl->alg_flags = def->flags;
tmpl->qce = qce;
ret = crypto_register_aead(alg);
if (ret) {
dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
kfree(tmpl);
return ret;
}
list_add_tail(&tmpl->entry, &aead_algs);
dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
return 0;
}
static void qce_aead_unregister(struct qce_device *qce)
{
struct qce_alg_template *tmpl, *n;
list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
crypto_unregister_aead(&tmpl->alg.aead);
list_del(&tmpl->entry);
kfree(tmpl);
}
}
static int qce_aead_register(struct qce_device *qce)
{
int ret, i;
for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
ret = qce_aead_register_one(&aead_def[i], qce);
if (ret)
goto err;
}
return 0;
err:
qce_aead_unregister(qce);
return ret;
}
const struct qce_algo_ops aead_ops = {
.type = CRYPTO_ALG_TYPE_AEAD,
.register_algs = qce_aead_register,
.unregister_algs = qce_aead_unregister,
.async_req_handle = qce_aead_async_req_handle,
};
| linux-master | drivers/crypto/qce/aead.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include "core.h"
#include "cipher.h"
#include "sha.h"
#include "aead.h"
#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
#define QCE_DEFAULT_MEM_BANDWIDTH 393600
static const struct qce_algo_ops *qce_ops[] = {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
&aead_ops,
#endif
};
static void qce_unregister_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ops->unregister_algs(qce);
}
}
static int qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
int i, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
if (ret)
break;
}
return ret;
}
static int qce_handle_request(struct crypto_async_request *async_req)
{
int ret = -EINVAL, i;
const struct qce_algo_ops *ops;
u32 type = crypto_tfm_alg_type(async_req->tfm);
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
if (type != ops->type)
continue;
ret = ops->async_req_handle(async_req);
break;
}
return ret;
}
static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int ret = 0, err;
spin_lock_irqsave(&qce->lock, flags);
if (req)
ret = crypto_enqueue_request(&qce->queue, req);
/* busy, do not dequeue request */
if (qce->req) {
spin_unlock_irqrestore(&qce->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&qce->queue);
async_req = crypto_dequeue_request(&qce->queue);
if (async_req)
qce->req = async_req;
spin_unlock_irqrestore(&qce->lock, flags);
if (!async_req)
return ret;
if (backlog) {
spin_lock_bh(&qce->lock);
crypto_request_complete(backlog, -EINPROGRESS);
spin_unlock_bh(&qce->lock);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
tasklet_schedule(&qce->done_tasklet);
}
return ret;
}
static void qce_tasklet_req_done(unsigned long data)
{
struct qce_device *qce = (struct qce_device *)data;
struct crypto_async_request *req;
unsigned long flags;
spin_lock_irqsave(&qce->lock, flags);
req = qce->req;
qce->req = NULL;
spin_unlock_irqrestore(&qce->lock, flags);
if (req)
crypto_request_complete(req, qce->result);
qce_handle_queue(qce, NULL);
}
static int qce_async_request_enqueue(struct qce_device *qce,
struct crypto_async_request *req)
{
return qce_handle_queue(qce, req);
}
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
tasklet_schedule(&qce->done_tasklet);
}
static int qce_check_version(struct qce_device *qce)
{
u32 major, minor, step;
qce_get_version(qce, &major, &minor, &step);
/*
* the driver does not support v5 with minor 0 because it has special
* alignment requirements.
*/
if (major != QCE_MAJOR_VERSION5 || minor == 0)
return -ENODEV;
qce->burst_size = QCE_BAM_BURST_SIZE;
/*
* Rx and tx pipes are treated as a pair inside CE.
* Pipe pair number depends on the actual BAM dma pipe
* that is used for transfers. The BAM dma pipes are passed
* from the device tree and used to derive the pipe pair
* id in the CE driver as follows.
* BAM dma pipes(rx, tx) CE pipe pair id
* 0,1 0
* 2,3 1
* 4,5 2
* 6,7 3
* ...
*/
qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1;
dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
major, minor, step);
return 0;
}
static int qce_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qce_device *qce;
int ret;
qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
if (!qce)
return -ENOMEM;
qce->dev = dev;
platform_set_drvdata(pdev, qce);
qce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qce->base))
return PTR_ERR(qce->base);
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
qce->core = devm_clk_get_optional(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
qce->iface = devm_clk_get_optional(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
qce->bus = devm_clk_get_optional(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
qce->mem_path = devm_of_icc_get(qce->dev, "memory");
if (IS_ERR(qce->mem_path))
return PTR_ERR(qce->mem_path);
ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
if (ret)
return ret;
ret = clk_prepare_enable(qce->core);
if (ret)
goto err_mem_path_disable;
ret = clk_prepare_enable(qce->iface);
if (ret)
goto err_clks_core;
ret = clk_prepare_enable(qce->bus);
if (ret)
goto err_clks_iface;
ret = qce_dma_request(qce->dev, &qce->dma);
if (ret)
goto err_clks;
ret = qce_check_version(qce);
if (ret)
goto err_clks;
spin_lock_init(&qce->lock);
tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
(unsigned long)qce);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
ret = qce_register_algs(qce);
if (ret)
goto err_dma;
return 0;
err_dma:
qce_dma_release(&qce->dma);
err_clks:
clk_disable_unprepare(qce->bus);
err_clks_iface:
clk_disable_unprepare(qce->iface);
err_clks_core:
clk_disable_unprepare(qce->core);
err_mem_path_disable:
icc_set_bw(qce->mem_path, 0, 0);
return ret;
}
static int qce_crypto_remove(struct platform_device *pdev)
{
struct qce_device *qce = platform_get_drvdata(pdev);
tasklet_kill(&qce->done_tasklet);
qce_unregister_algs(qce);
qce_dma_release(&qce->dma);
clk_disable_unprepare(qce->bus);
clk_disable_unprepare(qce->iface);
clk_disable_unprepare(qce->core);
return 0;
}
static const struct of_device_id qce_crypto_of_match[] = {
{ .compatible = "qcom,crypto-v5.1", },
{ .compatible = "qcom,crypto-v5.4", },
{ .compatible = "qcom,qce", },
{}
};
MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
.remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
},
};
module_platform_driver(qce_crypto_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm crypto engine driver");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("The Linux Foundation");
| linux-master | drivers/crypto/qce/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
}
dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
GFP_KERNEL);
if (!dma->result_buf) {
ret = -ENOMEM;
goto error_nomem;
}
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
return 0;
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
dma_release_channel(dma->txchan);
return ret;
}
void qce_dma_release(struct qce_dma_data *dma)
{
dma_release_channel(dma->txchan);
dma_release_channel(dma->rxchan);
kfree(dma->result_buf);
}
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
unsigned int new_len;
while (sg) {
if (!sg_page(sg))
break;
sg = sg_next(sg);
}
if (!sg)
return ERR_PTR(-EINVAL);
while (new_sgl && sg && max_len) {
new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
max_len -= new_len;
}
return sg_last;
}
static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
int nents, unsigned long flags,
enum dma_transfer_direction dir,
dma_async_tx_callback cb, void *cb_param)
{
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
if (!sg || !nents)
return -EINVAL;
desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
if (!desc)
return -EINVAL;
desc->callback = cb;
desc->callback_param = cb_param;
cookie = dmaengine_submit(desc);
return dma_submit_error(cookie);
}
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
int rx_nents, struct scatterlist *tx_sg, int tx_nents,
dma_async_tx_callback cb, void *cb_param)
{
struct dma_chan *rxchan = dma->rxchan;
struct dma_chan *txchan = dma->txchan;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
int ret;
ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
NULL, NULL);
if (ret)
return ret;
return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
cb, cb_param);
}
void qce_dma_issue_pending(struct qce_dma_data *dma)
{
dma_async_issue_pending(dma->rxchan);
dma_async_issue_pending(dma->txchan);
}
int qce_dma_terminate_all(struct qce_dma_data *dma)
{
int ret;
ret = dmaengine_terminate_all(dma->rxchan);
return ret ?: dmaengine_terminate_all(dma->txchan);
}
| linux-master | drivers/crypto/qce/dma.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.