blob: 71f867340a88b43a98572c1b84c6776528e919fc [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_fbdev.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 *
Inki Daed81aecb2012-12-18 02:30:17 +09009 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090013 */
14
David Howells760285e2012-10-02 18:01:07 +010015#include <drm/drmP.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_fb_helper.h>
18#include <drm/drm_crtc_helper.h>
Inki Dae1c248b72011-10-04 19:19:01 +090019
20#include "exynos_drm_drv.h"
21#include "exynos_drm_fb.h"
Inki Dae2c871122011-11-12 15:23:32 +090022#include "exynos_drm_gem.h"
Inki Daec704f1b2012-12-21 17:59:20 +090023#include "exynos_drm_iommu.h"
Inki Dae1c248b72011-10-04 19:19:01 +090024
25#define MAX_CONNECTOR 4
26#define PREFERRED_BPP 32
27
28#define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
29 drm_fb_helper)
30
31struct exynos_drm_fbdev {
Joonyoung Shime1533c02011-12-13 14:46:57 +090032 struct drm_fb_helper drm_fb_helper;
33 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +090034};
35
Prathyush Kdd265852012-11-19 13:55:28 +053036static int exynos_drm_fb_mmap(struct fb_info *info,
37 struct vm_area_struct *vma)
38{
39 struct drm_fb_helper *helper = info->par;
40 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
41 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
42 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
43 unsigned long vm_size;
44 int ret;
45
46 DRM_DEBUG_KMS("%s\n", __func__);
47
48 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
49
50 vm_size = vma->vm_end - vma->vm_start;
51
52 if (vm_size > buffer->size)
53 return -EINVAL;
54
Inki Dae4744ad22012-12-07 17:51:27 +090055 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
Prathyush Kdd265852012-11-19 13:55:28 +053056 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
57 if (ret < 0) {
58 DRM_ERROR("failed to mmap.\n");
59 return ret;
60 }
61
62 return 0;
63}
64
Inki Dae1c248b72011-10-04 19:19:01 +090065static struct fb_ops exynos_drm_fb_ops = {
66 .owner = THIS_MODULE,
Prathyush Kdd265852012-11-19 13:55:28 +053067 .fb_mmap = exynos_drm_fb_mmap,
Inki Dae1c248b72011-10-04 19:19:01 +090068 .fb_fillrect = cfb_fillrect,
69 .fb_copyarea = cfb_copyarea,
70 .fb_imageblit = cfb_imageblit,
71 .fb_check_var = drm_fb_helper_check_var,
Sascha Hauer83b316f2012-02-01 11:38:37 +010072 .fb_set_par = drm_fb_helper_set_par,
Inki Dae1c248b72011-10-04 19:19:01 +090073 .fb_blank = drm_fb_helper_blank,
74 .fb_pan_display = drm_fb_helper_pan_display,
75 .fb_setcmap = drm_fb_helper_setcmap,
76};
77
Inki Dae19c8b832011-10-14 13:29:46 +090078static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +090079 struct drm_framebuffer *fb)
Inki Dae1c248b72011-10-04 19:19:01 +090080{
81 struct fb_info *fbi = helper->fbdev;
82 struct drm_device *dev = helper->dev;
Inki Dae2c871122011-11-12 15:23:32 +090083 struct exynos_drm_gem_buf *buffer;
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +090084 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
Inki Dae19c8b832011-10-14 13:29:46 +090085 unsigned long offset;
Inki Dae1c248b72011-10-04 19:19:01 +090086
87 DRM_DEBUG_KMS("%s\n", __FILE__);
88
Ville Syrjälä01f2c772011-12-20 00:06:49 +020089 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +090090 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
Inki Dae1c248b72011-10-04 19:19:01 +090091
Seung-Woo Kim229d3532011-12-15 14:36:22 +090092 /* RGB formats use only one buffer */
93 buffer = exynos_drm_fb_buffer(fb, 0);
Inki Dae2c871122011-11-12 15:23:32 +090094 if (!buffer) {
95 DRM_LOG_KMS("buffer is null.\n");
Inki Dae19c8b832011-10-14 13:29:46 +090096 return -EFAULT;
97 }
Inki Dae1c248b72011-10-04 19:19:01 +090098
Inki Dae4744ad22012-12-07 17:51:27 +090099 /* map pages with kernel virtual space. */
100 if (!buffer->kvaddr) {
Inki Daec704f1b2012-12-21 17:59:20 +0900101 if (is_drm_iommu_supported(dev)) {
102 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
103
104 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
Inki Dae4744ad22012-12-07 17:51:27 +0900105 pgprot_writecombine(PAGE_KERNEL));
Inki Daec704f1b2012-12-21 17:59:20 +0900106 } else {
107 phys_addr_t dma_addr = buffer->dma_addr;
108 if (dma_addr)
109 buffer->kvaddr = phys_to_virt(dma_addr);
110 else
111 buffer->kvaddr = (void __iomem *)NULL;
112 }
Inki Dae4744ad22012-12-07 17:51:27 +0900113 if (!buffer->kvaddr) {
114 DRM_ERROR("failed to map pages to kernel space.\n");
115 return -EIO;
116 }
117 }
118
Inki Dae01ed8122012-08-20 20:05:56 +0900119 /* buffer count to framebuffer always is 1 at booting time. */
120 exynos_drm_fb_set_buf_cnt(fb, 1);
121
Inki Dae19c8b832011-10-14 13:29:46 +0900122 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
Ville Syrjälä01f2c772011-12-20 00:06:49 +0200123 offset += fbi->var.yoffset * fb->pitches[0];
Inki Dae1c248b72011-10-04 19:19:01 +0900124
Inki Dae2c871122011-11-12 15:23:32 +0900125 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
126 fbi->screen_base = buffer->kvaddr + offset;
Inki Daec704f1b2012-12-21 17:59:20 +0900127 if (is_drm_iommu_supported(dev))
128 fbi->fix.smem_start = (unsigned long)
Prathyush K640631d2012-11-22 12:18:35 +0530129 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
Inki Daec704f1b2012-12-21 17:59:20 +0900130 else
131 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
132
Inki Dae1c248b72011-10-04 19:19:01 +0900133 fbi->screen_size = size;
Inki Dae1c248b72011-10-04 19:19:01 +0900134 fbi->fix.smem_len = size;
Inki Dae19c8b832011-10-14 13:29:46 +0900135
136 return 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900137}
138
139static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
140 struct drm_fb_helper_surface_size *sizes)
141{
142 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
Joonyoung Shime1533c02011-12-13 14:46:57 +0900143 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900144 struct drm_device *dev = helper->dev;
145 struct fb_info *fbi;
Joonyoung Shima794d572011-12-08 15:05:19 +0900146 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
Inki Dae1c248b72011-10-04 19:19:01 +0900147 struct platform_device *pdev = dev->platformdev;
Joonyoung Shime1533c02011-12-13 14:46:57 +0900148 unsigned long size;
Inki Dae1c248b72011-10-04 19:19:01 +0900149 int ret;
150
151 DRM_DEBUG_KMS("%s\n", __FILE__);
152
153 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
154 sizes->surface_width, sizes->surface_height,
155 sizes->surface_bpp);
156
157 mode_cmd.width = sizes->surface_width;
158 mode_cmd.height = sizes->surface_height;
Joonyoung Shima794d572011-12-08 15:05:19 +0900159 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
160 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
161 sizes->surface_depth);
Inki Dae1c248b72011-10-04 19:19:01 +0900162
163 mutex_lock(&dev->struct_mutex);
164
165 fbi = framebuffer_alloc(0, &pdev->dev);
166 if (!fbi) {
167 DRM_ERROR("failed to allocate fb info.\n");
168 ret = -ENOMEM;
169 goto out;
170 }
171
Joonyoung Shime1533c02011-12-13 14:46:57 +0900172 size = mode_cmd.pitches[0] * mode_cmd.height;
Inki Dae2b358922012-03-16 18:47:05 +0900173
174 /* 0 means to allocate physically continuous memory */
175 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
Joonyoung Shime1533c02011-12-13 14:46:57 +0900176 if (IS_ERR(exynos_gem_obj)) {
177 ret = PTR_ERR(exynos_gem_obj);
Inki Dae662aa6d2012-12-07 18:06:43 +0900178 goto err_release_framebuffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900179 }
180
Joonyoung Shime1533c02011-12-13 14:46:57 +0900181 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
182
183 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
184 &exynos_gem_obj->base);
185 if (IS_ERR_OR_NULL(helper->fb)) {
186 DRM_ERROR("failed to create drm framebuffer.\n");
187 ret = PTR_ERR(helper->fb);
Inki Dae662aa6d2012-12-07 18:06:43 +0900188 goto err_destroy_gem;
Joonyoung Shime1533c02011-12-13 14:46:57 +0900189 }
190
Inki Dae1c248b72011-10-04 19:19:01 +0900191 helper->fbdev = fbi;
192
193 fbi->par = helper;
194 fbi->flags = FBINFO_FLAG_DEFAULT;
195 fbi->fbops = &exynos_drm_fb_ops;
196
197 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
198 if (ret) {
199 DRM_ERROR("failed to allocate cmap.\n");
Inki Dae662aa6d2012-12-07 18:06:43 +0900200 goto err_destroy_framebuffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900201 }
202
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +0900203 ret = exynos_drm_fbdev_update(helper, helper->fb);
Inki Dae662aa6d2012-12-07 18:06:43 +0900204 if (ret < 0)
205 goto err_dealloc_cmap;
206
207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209
210err_dealloc_cmap:
211 fb_dealloc_cmap(&fbi->cmap);
212err_destroy_framebuffer:
213 drm_framebuffer_cleanup(helper->fb);
214err_destroy_gem:
215 exynos_drm_gem_destroy(exynos_gem_obj);
216err_release_framebuffer:
217 framebuffer_release(fbi);
Inki Dae1c248b72011-10-04 19:19:01 +0900218
219/*
220 * if failed, all resources allocated above would be released by
221 * drm_mode_config_cleanup() when drm_load() had been called prior
222 * to any specific driver such as fimd or hdmi driver.
223 */
224out:
225 mutex_unlock(&dev->struct_mutex);
226 return ret;
227}
228
Inki Dae1c248b72011-10-04 19:19:01 +0900229static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
230 struct drm_fb_helper_surface_size *sizes)
231{
232 int ret = 0;
233
234 DRM_DEBUG_KMS("%s\n", __FILE__);
235
Inki Daebc41eae2012-02-15 11:25:21 +0900236 /*
237 * with !helper->fb, it means that this funcion is called first time
238 * and after that, the helper->fb would be used as clone mode.
239 */
Inki Dae1c248b72011-10-04 19:19:01 +0900240 if (!helper->fb) {
241 ret = exynos_drm_fbdev_create(helper, sizes);
242 if (ret < 0) {
243 DRM_ERROR("failed to create fbdev.\n");
244 return ret;
245 }
246
247 /*
248 * fb_helper expects a value more than 1 if succeed
249 * because register_framebuffer() should be called.
250 */
251 ret = 1;
Inki Dae1c248b72011-10-04 19:19:01 +0900252 }
253
254 return ret;
255}
256
257static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
258 .fb_probe = exynos_drm_fbdev_probe,
259};
260
261int exynos_drm_fbdev_init(struct drm_device *dev)
262{
263 struct exynos_drm_fbdev *fbdev;
264 struct exynos_drm_private *private = dev->dev_private;
265 struct drm_fb_helper *helper;
266 unsigned int num_crtc;
267 int ret;
268
269 DRM_DEBUG_KMS("%s\n", __FILE__);
270
271 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
272 return 0;
273
274 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
275 if (!fbdev) {
276 DRM_ERROR("failed to allocate drm fbdev.\n");
277 return -ENOMEM;
278 }
279
280 private->fb_helper = helper = &fbdev->drm_fb_helper;
281 helper->funcs = &exynos_drm_fb_helper_funcs;
282
283 num_crtc = dev->mode_config.num_crtc;
284
285 ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
286 if (ret < 0) {
287 DRM_ERROR("failed to initialize drm fb helper.\n");
288 goto err_init;
289 }
290
291 ret = drm_fb_helper_single_add_all_connectors(helper);
292 if (ret < 0) {
293 DRM_ERROR("failed to register drm_fb_helper_connector.\n");
294 goto err_setup;
295
296 }
297
298 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
299 if (ret < 0) {
300 DRM_ERROR("failed to set up hw configuration.\n");
301 goto err_setup;
302 }
303
304 return 0;
305
306err_setup:
307 drm_fb_helper_fini(helper);
308
309err_init:
310 private->fb_helper = NULL;
311 kfree(fbdev);
312
313 return ret;
314}
315
316static void exynos_drm_fbdev_destroy(struct drm_device *dev,
317 struct drm_fb_helper *fb_helper)
318{
Inki Dae4744ad22012-12-07 17:51:27 +0900319 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
320 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900321 struct drm_framebuffer *fb;
322
Inki Daec704f1b2012-12-21 17:59:20 +0900323 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
Inki Dae4744ad22012-12-07 17:51:27 +0900324 vunmap(exynos_gem_obj->buffer->kvaddr);
325
Inki Dae1c248b72011-10-04 19:19:01 +0900326 /* release drm framebuffer and real buffer */
327 if (fb_helper->fb && fb_helper->fb->funcs) {
328 fb = fb_helper->fb;
Rob Clarkf7eff602012-09-05 21:48:38 +0000329 if (fb)
330 drm_framebuffer_remove(fb);
Inki Dae1c248b72011-10-04 19:19:01 +0900331 }
332
333 /* release linux framebuffer */
334 if (fb_helper->fbdev) {
335 struct fb_info *info;
336 int ret;
337
338 info = fb_helper->fbdev;
339 ret = unregister_framebuffer(info);
340 if (ret < 0)
341 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
342
343 if (info->cmap.len)
344 fb_dealloc_cmap(&info->cmap);
345
346 framebuffer_release(info);
347 }
348
349 drm_fb_helper_fini(fb_helper);
350}
351
352void exynos_drm_fbdev_fini(struct drm_device *dev)
353{
354 struct exynos_drm_private *private = dev->dev_private;
355 struct exynos_drm_fbdev *fbdev;
356
357 if (!private || !private->fb_helper)
358 return;
359
360 fbdev = to_exynos_fbdev(private->fb_helper);
361
Joonyoung Shime1533c02011-12-13 14:46:57 +0900362 if (fbdev->exynos_gem_obj)
363 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
364
Inki Dae1c248b72011-10-04 19:19:01 +0900365 exynos_drm_fbdev_destroy(dev, private->fb_helper);
366 kfree(fbdev);
367 private->fb_helper = NULL;
368}
369
370void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
371{
372 struct exynos_drm_private *private = dev->dev_private;
373
374 if (!private || !private->fb_helper)
375 return;
376
377 drm_fb_helper_restore_fbdev_mode(private->fb_helper);
378}