|  | /* | 
|  | * Crypto acceleration support for Rockchip RK3288 | 
|  | * | 
|  | * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd | 
|  | * | 
|  | * Author: Zain Wang <zain.wang@rock-chips.com> | 
|  | * | 
|  | * This program is free software; you can redistribute it and/or modify it | 
|  | * under the terms and conditions of the GNU General Public License, | 
|  | * version 2, as published by the Free Software Foundation. | 
|  | * | 
|  | * Some ideas are from marvell-cesa.c and s5p-sss.c driver. | 
|  | */ | 
|  |  | 
|  | #include "rk3288_crypto.h" | 
|  | #include <linux/module.h> | 
|  | #include <linux/platform_device.h> | 
|  | #include <linux/of.h> | 
|  | #include <linux/clk.h> | 
|  | #include <linux/crypto.h> | 
|  | #include <linux/reset.h> | 
|  |  | 
|  | static int rk_crypto_enable_clk(struct rk_crypto_info *dev) | 
|  | { | 
|  | int err; | 
|  |  | 
|  | err = clk_prepare_enable(dev->sclk); | 
|  | if (err) { | 
|  | dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", | 
|  | __func__, __LINE__); | 
|  | goto err_return; | 
|  | } | 
|  | err = clk_prepare_enable(dev->aclk); | 
|  | if (err) { | 
|  | dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", | 
|  | __func__, __LINE__); | 
|  | goto err_aclk; | 
|  | } | 
|  | err = clk_prepare_enable(dev->hclk); | 
|  | if (err) { | 
|  | dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", | 
|  | __func__, __LINE__); | 
|  | goto err_hclk; | 
|  | } | 
|  | err = clk_prepare_enable(dev->dmaclk); | 
|  | if (err) { | 
|  | dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", | 
|  | __func__, __LINE__); | 
|  | goto err_dmaclk; | 
|  | } | 
|  | return err; | 
|  | err_dmaclk: | 
|  | clk_disable_unprepare(dev->hclk); | 
|  | err_hclk: | 
|  | clk_disable_unprepare(dev->aclk); | 
|  | err_aclk: | 
|  | clk_disable_unprepare(dev->sclk); | 
|  | err_return: | 
|  | return err; | 
|  | } | 
|  |  | 
|  | static void rk_crypto_disable_clk(struct rk_crypto_info *dev) | 
|  | { | 
|  | clk_disable_unprepare(dev->dmaclk); | 
|  | clk_disable_unprepare(dev->hclk); | 
|  | clk_disable_unprepare(dev->aclk); | 
|  | clk_disable_unprepare(dev->sclk); | 
|  | } | 
|  |  | 
|  | static int check_alignment(struct scatterlist *sg_src, | 
|  | struct scatterlist *sg_dst, | 
|  | int align_mask) | 
|  | { | 
|  | int in, out, align; | 
|  |  | 
|  | in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && | 
|  | IS_ALIGNED((uint32_t)sg_src->length, align_mask); | 
|  | if (!sg_dst) | 
|  | return in; | 
|  | out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && | 
|  | IS_ALIGNED((uint32_t)sg_dst->length, align_mask); | 
|  | align = in && out; | 
|  |  | 
|  | return (align && (sg_src->length == sg_dst->length)); | 
|  | } | 
|  |  | 
|  | static int rk_load_data(struct rk_crypto_info *dev, | 
|  | struct scatterlist *sg_src, | 
|  | struct scatterlist *sg_dst) | 
|  | { | 
|  | unsigned int count; | 
|  |  | 
|  | dev->aligned = dev->aligned ? | 
|  | check_alignment(sg_src, sg_dst, dev->align_size) : | 
|  | dev->aligned; | 
|  | if (dev->aligned) { | 
|  | count = min(dev->left_bytes, sg_src->length); | 
|  | dev->left_bytes -= count; | 
|  |  | 
|  | if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { | 
|  | dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n", | 
|  | __func__, __LINE__); | 
|  | return -EINVAL; | 
|  | } | 
|  | dev->addr_in = sg_dma_address(sg_src); | 
|  |  | 
|  | if (sg_dst) { | 
|  | if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { | 
|  | dev_err(dev->dev, | 
|  | "[%s:%d] dma_map_sg(dst)  error\n", | 
|  | __func__, __LINE__); | 
|  | dma_unmap_sg(dev->dev, sg_src, 1, | 
|  | DMA_TO_DEVICE); | 
|  | return -EINVAL; | 
|  | } | 
|  | dev->addr_out = sg_dma_address(sg_dst); | 
|  | } | 
|  | } else { | 
|  | count = (dev->left_bytes > PAGE_SIZE) ? | 
|  | PAGE_SIZE : dev->left_bytes; | 
|  |  | 
|  | if (!sg_pcopy_to_buffer(dev->first, dev->nents, | 
|  | dev->addr_vir, count, | 
|  | dev->total - dev->left_bytes)) { | 
|  | dev_err(dev->dev, "[%s:%d] pcopy err\n", | 
|  | __func__, __LINE__); | 
|  | return -EINVAL; | 
|  | } | 
|  | dev->left_bytes -= count; | 
|  | sg_init_one(&dev->sg_tmp, dev->addr_vir, count); | 
|  | if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { | 
|  | dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n", | 
|  | __func__, __LINE__); | 
|  | return -ENOMEM; | 
|  | } | 
|  | dev->addr_in = sg_dma_address(&dev->sg_tmp); | 
|  |  | 
|  | if (sg_dst) { | 
|  | if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, | 
|  | DMA_FROM_DEVICE)) { | 
|  | dev_err(dev->dev, | 
|  | "[%s:%d] dma_map_sg(sg_tmp)  error\n", | 
|  | __func__, __LINE__); | 
|  | dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, | 
|  | DMA_TO_DEVICE); | 
|  | return -ENOMEM; | 
|  | } | 
|  | dev->addr_out = sg_dma_address(&dev->sg_tmp); | 
|  | } | 
|  | } | 
|  | dev->count = count; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static void rk_unload_data(struct rk_crypto_info *dev) | 
|  | { | 
|  | struct scatterlist *sg_in, *sg_out; | 
|  |  | 
|  | sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; | 
|  | dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); | 
|  |  | 
|  | if (dev->sg_dst) { | 
|  | sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; | 
|  | dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); | 
|  | } | 
|  | } | 
|  |  | 
|  | static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) | 
|  | { | 
|  | struct rk_crypto_info *dev  = platform_get_drvdata(dev_id); | 
|  | u32 interrupt_status; | 
|  | int err = 0; | 
|  |  | 
|  | spin_lock(&dev->lock); | 
|  | interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); | 
|  | CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); | 
|  | if (interrupt_status & 0x0a) { | 
|  | dev_warn(dev->dev, "DMA Error\n"); | 
|  | err = -EFAULT; | 
|  | } else if (interrupt_status & 0x05) { | 
|  | err = dev->update(dev); | 
|  | } | 
|  | if (err) | 
|  | dev->complete(dev, err); | 
|  | spin_unlock(&dev->lock); | 
|  | return IRQ_HANDLED; | 
|  | } | 
|  |  | 
|  | static void rk_crypto_tasklet_cb(unsigned long data) | 
|  | { | 
|  | struct rk_crypto_info *dev = (struct rk_crypto_info *)data; | 
|  | struct crypto_async_request *async_req, *backlog; | 
|  | unsigned long flags; | 
|  | int err = 0; | 
|  |  | 
|  | spin_lock_irqsave(&dev->lock, flags); | 
|  | backlog   = crypto_get_backlog(&dev->queue); | 
|  | async_req = crypto_dequeue_request(&dev->queue); | 
|  | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | if (!async_req) { | 
|  | dev_err(dev->dev, "async_req is NULL !!\n"); | 
|  | return; | 
|  | } | 
|  | if (backlog) { | 
|  | backlog->complete(backlog, -EINPROGRESS); | 
|  | backlog = NULL; | 
|  | } | 
|  |  | 
|  | if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) | 
|  | dev->ablk_req = ablkcipher_request_cast(async_req); | 
|  | else | 
|  | dev->ahash_req = ahash_request_cast(async_req); | 
|  | err = dev->start(dev); | 
|  | if (err) | 
|  | dev->complete(dev, err); | 
|  | } | 
|  |  | 
|  | static struct rk_crypto_tmp *rk_cipher_algs[] = { | 
|  | &rk_ecb_aes_alg, | 
|  | &rk_cbc_aes_alg, | 
|  | &rk_ecb_des_alg, | 
|  | &rk_cbc_des_alg, | 
|  | &rk_ecb_des3_ede_alg, | 
|  | &rk_cbc_des3_ede_alg, | 
|  | &rk_ahash_sha1, | 
|  | &rk_ahash_sha256, | 
|  | &rk_ahash_md5, | 
|  | }; | 
|  |  | 
|  | static int rk_crypto_register(struct rk_crypto_info *crypto_info) | 
|  | { | 
|  | unsigned int i, k; | 
|  | int err = 0; | 
|  |  | 
|  | for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { | 
|  | rk_cipher_algs[i]->dev = crypto_info; | 
|  | if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) | 
|  | err = crypto_register_alg( | 
|  | &rk_cipher_algs[i]->alg.crypto); | 
|  | else | 
|  | err = crypto_register_ahash( | 
|  | &rk_cipher_algs[i]->alg.hash); | 
|  | if (err) | 
|  | goto err_cipher_algs; | 
|  | } | 
|  | return 0; | 
|  |  | 
|  | err_cipher_algs: | 
|  | for (k = 0; k < i; k++) { | 
|  | if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) | 
|  | crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); | 
|  | else | 
|  | crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); | 
|  | } | 
|  | return err; | 
|  | } | 
|  |  | 
|  | static void rk_crypto_unregister(void) | 
|  | { | 
|  | unsigned int i; | 
|  |  | 
|  | for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { | 
|  | if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) | 
|  | crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); | 
|  | else | 
|  | crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); | 
|  | } | 
|  | } | 
|  |  | 
|  | static void rk_crypto_action(void *data) | 
|  | { | 
|  | struct rk_crypto_info *crypto_info = data; | 
|  |  | 
|  | reset_control_assert(crypto_info->rst); | 
|  | } | 
|  |  | 
|  | static const struct of_device_id crypto_of_id_table[] = { | 
|  | { .compatible = "rockchip,rk3288-crypto" }, | 
|  | {} | 
|  | }; | 
|  | MODULE_DEVICE_TABLE(of, crypto_of_id_table); | 
|  |  | 
|  | static int rk_crypto_probe(struct platform_device *pdev) | 
|  | { | 
|  | struct resource *res; | 
|  | struct device *dev = &pdev->dev; | 
|  | struct rk_crypto_info *crypto_info; | 
|  | int err = 0; | 
|  |  | 
|  | crypto_info = devm_kzalloc(&pdev->dev, | 
|  | sizeof(*crypto_info), GFP_KERNEL); | 
|  | if (!crypto_info) { | 
|  | err = -ENOMEM; | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); | 
|  | if (IS_ERR(crypto_info->rst)) { | 
|  | err = PTR_ERR(crypto_info->rst); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | reset_control_assert(crypto_info->rst); | 
|  | usleep_range(10, 20); | 
|  | reset_control_deassert(crypto_info->rst); | 
|  |  | 
|  | err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); | 
|  | if (err) | 
|  | goto err_crypto; | 
|  |  | 
|  | spin_lock_init(&crypto_info->lock); | 
|  |  | 
|  | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); | 
|  | if (IS_ERR(crypto_info->reg)) { | 
|  | err = PTR_ERR(crypto_info->reg); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); | 
|  | if (IS_ERR(crypto_info->aclk)) { | 
|  | err = PTR_ERR(crypto_info->aclk); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); | 
|  | if (IS_ERR(crypto_info->hclk)) { | 
|  | err = PTR_ERR(crypto_info->hclk); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); | 
|  | if (IS_ERR(crypto_info->sclk)) { | 
|  | err = PTR_ERR(crypto_info->sclk); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); | 
|  | if (IS_ERR(crypto_info->dmaclk)) { | 
|  | err = PTR_ERR(crypto_info->dmaclk); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->irq = platform_get_irq(pdev, 0); | 
|  | if (crypto_info->irq < 0) { | 
|  | dev_warn(crypto_info->dev, | 
|  | "control Interrupt is not available.\n"); | 
|  | err = crypto_info->irq; | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | err = devm_request_irq(&pdev->dev, crypto_info->irq, | 
|  | rk_crypto_irq_handle, IRQF_SHARED, | 
|  | "rk-crypto", pdev); | 
|  |  | 
|  | if (err) { | 
|  | dev_err(crypto_info->dev, "irq request failed.\n"); | 
|  | goto err_crypto; | 
|  | } | 
|  |  | 
|  | crypto_info->dev = &pdev->dev; | 
|  | platform_set_drvdata(pdev, crypto_info); | 
|  |  | 
|  | tasklet_init(&crypto_info->crypto_tasklet, | 
|  | rk_crypto_tasklet_cb, (unsigned long)crypto_info); | 
|  | crypto_init_queue(&crypto_info->queue, 50); | 
|  |  | 
|  | crypto_info->enable_clk = rk_crypto_enable_clk; | 
|  | crypto_info->disable_clk = rk_crypto_disable_clk; | 
|  | crypto_info->load_data = rk_load_data; | 
|  | crypto_info->unload_data = rk_unload_data; | 
|  |  | 
|  | err = rk_crypto_register(crypto_info); | 
|  | if (err) { | 
|  | dev_err(dev, "err in register alg"); | 
|  | goto err_register_alg; | 
|  | } | 
|  |  | 
|  | dev_info(dev, "Crypto Accelerator successfully registered\n"); | 
|  | return 0; | 
|  |  | 
|  | err_register_alg: | 
|  | tasklet_kill(&crypto_info->crypto_tasklet); | 
|  | err_crypto: | 
|  | return err; | 
|  | } | 
|  |  | 
|  | static int rk_crypto_remove(struct platform_device *pdev) | 
|  | { | 
|  | struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); | 
|  |  | 
|  | rk_crypto_unregister(); | 
|  | tasklet_kill(&crypto_tmp->crypto_tasklet); | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static struct platform_driver crypto_driver = { | 
|  | .probe		= rk_crypto_probe, | 
|  | .remove		= rk_crypto_remove, | 
|  | .driver		= { | 
|  | .name	= "rk3288-crypto", | 
|  | .of_match_table	= crypto_of_id_table, | 
|  | }, | 
|  | }; | 
|  |  | 
|  | module_platform_driver(crypto_driver); | 
|  |  | 
|  | MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); | 
|  | MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); | 
|  | MODULE_LICENSE("GPL"); |