blob: f9881f9e62bd401dbb8b25ad63414c0acdc51efb [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/vmwgfx_drm.h>
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +020030#include "vmwgfx_kms.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010032struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header;
34 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
35};
36
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037int vmw_getparam_ioctl(struct drm_device *dev, void *data,
38 struct drm_file *file_priv)
39{
40 struct vmw_private *dev_priv = vmw_priv(dev);
41 struct drm_vmw_getparam_arg *param =
42 (struct drm_vmw_getparam_arg *)data;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010043 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000044
45 switch (param->param) {
46 case DRM_VMW_PARAM_NUM_STREAMS:
47 param->value = vmw_overlay_num_overlays(dev_priv);
48 break;
49 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
50 param->value = vmw_overlay_num_free_overlays(dev_priv);
51 break;
52 case DRM_VMW_PARAM_3D:
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000053 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054 break;
Thomas Hellstromf77cef32010-02-09 19:41:55 +000055 case DRM_VMW_PARAM_HW_CAPS:
56 param->value = dev_priv->capabilities;
57 break;
58 case DRM_VMW_PARAM_FIFO_CAPS:
59 param->value = dev_priv->fifo.capabilities;
60 break;
Thomas Hellstrom30f47fc82010-10-05 12:43:06 +020061 case DRM_VMW_PARAM_MAX_FB_SIZE:
Thomas Hellstrombc2d6502012-11-21 10:32:36 +010062 param->value = dev_priv->prim_bb_mem;
Thomas Hellstrom30f47fc82010-10-05 12:43:06 +020063 break;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000064 case DRM_VMW_PARAM_FIFO_HW_VERSION:
65 {
66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010067 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000068
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010069 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
70 param->value = SVGA3D_HWVERSION_WS8_B1;
71 break;
72 }
73
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010074 param->value =
75 ioread32(fifo_mem +
76 ((fifo->capabilities &
77 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
78 SVGA_FIFO_3D_HWVERSION_REVISED :
79 SVGA_FIFO_3D_HWVERSION));
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000080 break;
81 }
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010082 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010083 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
84 !vmw_fp->gb_aware)
85 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
86 else
87 param->value = dev_priv->memory_size;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010088 break;
89 case DRM_VMW_PARAM_3D_CAPS_SIZE:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010090 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
91 vmw_fp->gb_aware)
92 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
93 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
94 param->value = sizeof(struct svga_3d_compat_cap) +
95 sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010096 else
97 param->value = (SVGA_FIFO_3D_CAPS_LAST -
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010098 SVGA_FIFO_3D_CAPS + 1) *
99 sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100100 break;
Thomas Hellstrom311474d2012-11-21 12:34:47 +0100101 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100102 vmw_fp->gb_aware = true;
Thomas Hellstrom311474d2012-11-21 12:34:47 +0100103 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
104 break;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000105 default:
106 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
107 param->param);
108 return -EINVAL;
109 }
110
111 return 0;
112}
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000113
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100114static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
115 size_t size)
116{
117 struct svga_3d_compat_cap *compat_cap =
118 (struct svga_3d_compat_cap *) bounce;
119 unsigned int i;
120 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
121 unsigned int max_size;
122
123 if (size < pair_offset)
124 return -EINVAL;
125
126 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
127
128 if (max_size > SVGA3D_DEVCAP_MAX)
129 max_size = SVGA3D_DEVCAP_MAX;
130
131 compat_cap->header.length =
132 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
133 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
134
135 mutex_lock(&dev_priv->hw_mutex);
136 for (i = 0; i < max_size; ++i) {
137 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
138 compat_cap->pairs[i][0] = i;
139 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
140 }
141 mutex_unlock(&dev_priv->hw_mutex);
142
143 return 0;
144}
145
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000146
147int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file_priv)
149{
150 struct drm_vmw_get_3d_cap_arg *arg =
151 (struct drm_vmw_get_3d_cap_arg *) data;
152 struct vmw_private *dev_priv = vmw_priv(dev);
153 uint32_t size;
154 __le32 __iomem *fifo_mem;
155 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
156 void *bounce;
157 int ret;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100158 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100159 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000160
161 if (unlikely(arg->pad64 != 0)) {
162 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
163 return -EINVAL;
164 }
165
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100166 if (gb_objects && vmw_fp->gb_aware)
167 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
168 else if (gb_objects)
169 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100170 else
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100171 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
172 sizeof(uint32_t);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000173
174 if (arg->max_size < size)
175 size = arg->max_size;
176
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100177 bounce = vzalloc(size);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000178 if (unlikely(bounce == NULL)) {
179 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
180 return -ENOMEM;
181 }
182
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100183 if (gb_objects && vmw_fp->gb_aware) {
184 int i, num;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100185 uint32_t *bounce32 = (uint32_t *) bounce;
186
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100187 num = size / sizeof(uint32_t);
188 if (num > SVGA3D_DEVCAP_MAX)
189 num = SVGA3D_DEVCAP_MAX;
190
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100191 mutex_lock(&dev_priv->hw_mutex);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100192 for (i = 0; i < num; ++i) {
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100193 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
194 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
195 }
196 mutex_unlock(&dev_priv->hw_mutex);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100197 } else if (gb_objects) {
198 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
199 if (unlikely(ret != 0))
200 goto out_err;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100201 } else {
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100202 fifo_mem = dev_priv->mmio_virt;
203 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
204 }
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000205
206 ret = copy_to_user(buffer, bounce, size);
Dan Carpenter888155b2012-11-12 11:07:24 +0000207 if (ret)
208 ret = -EFAULT;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100209out_err:
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000210 vfree(bounce);
211
212 if (unlikely(ret != 0))
213 DRM_ERROR("Failed to report 3D caps info.\n");
214
215 return ret;
216}
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200217
218int vmw_present_ioctl(struct drm_device *dev, void *data,
219 struct drm_file *file_priv)
220{
221 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
222 struct vmw_private *dev_priv = vmw_priv(dev);
223 struct drm_vmw_present_arg *arg =
224 (struct drm_vmw_present_arg *)data;
225 struct vmw_surface *surface;
226 struct vmw_master *vmaster = vmw_master(file_priv->master);
227 struct drm_vmw_rect __user *clips_ptr;
228 struct drm_vmw_rect *clips = NULL;
Daniel Vetter786b99e2012-12-02 21:53:40 +0100229 struct drm_framebuffer *fb;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200230 struct vmw_framebuffer *vfb;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000231 struct vmw_resource *res;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200232 uint32_t num_clips;
233 int ret;
234
235 num_clips = arg->num_clips;
236 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
237
238 if (unlikely(num_clips == 0))
239 return 0;
240
241 if (clips_ptr == NULL) {
242 DRM_ERROR("Variable clips_ptr must be specified.\n");
243 ret = -EINVAL;
244 goto out_clips;
245 }
246
Thomas Meyer24bb5a02011-11-29 22:08:00 +0100247 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200248 if (clips == NULL) {
249 DRM_ERROR("Failed to allocate clip rect list.\n");
250 ret = -ENOMEM;
251 goto out_clips;
252 }
253
254 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
255 if (ret) {
256 DRM_ERROR("Failed to copy clip rects from userspace.\n");
Dan Carpenterd2c184f2011-10-18 09:09:19 +0300257 ret = -EFAULT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200258 goto out_no_copy;
259 }
260
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100261 drm_modeset_lock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200262
Daniel Vetter786b99e2012-12-02 21:53:40 +0100263 fb = drm_framebuffer_lookup(dev, arg->fb_id);
264 if (!fb) {
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200265 DRM_ERROR("Invalid framebuffer id.\n");
Ville Syrjälä43789b92013-10-17 13:35:06 +0300266 ret = -ENOENT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200267 goto out_no_fb;
268 }
Daniel Vetter786b99e2012-12-02 21:53:40 +0100269 vfb = vmw_framebuffer_to_vfb(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200270
271 ret = ttm_read_lock(&vmaster->lock, true);
272 if (unlikely(ret != 0))
273 goto out_no_ttm_lock;
274
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000275 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
276 user_surface_converter,
277 &res);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200278 if (ret)
279 goto out_no_surface;
280
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000281 surface = vmw_res_to_srf(res);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200282 ret = vmw_kms_present(dev_priv, file_priv,
283 vfb, surface, arg->sid,
284 arg->dest_x, arg->dest_y,
285 clips, num_clips);
286
287 /* vmw_user_surface_lookup takes one ref so does new_fb */
288 vmw_surface_unreference(&surface);
289
290out_no_surface:
291 ttm_read_unlock(&vmaster->lock);
292out_no_ttm_lock:
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100293 drm_framebuffer_unreference(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200294out_no_fb:
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100295 drm_modeset_unlock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200296out_no_copy:
297 kfree(clips);
298out_clips:
299 return ret;
300}
301
302int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
303 struct drm_file *file_priv)
304{
305 struct vmw_private *dev_priv = vmw_priv(dev);
306 struct drm_vmw_present_readback_arg *arg =
307 (struct drm_vmw_present_readback_arg *)data;
308 struct drm_vmw_fence_rep __user *user_fence_rep =
309 (struct drm_vmw_fence_rep __user *)
310 (unsigned long)arg->fence_rep;
311 struct vmw_master *vmaster = vmw_master(file_priv->master);
312 struct drm_vmw_rect __user *clips_ptr;
313 struct drm_vmw_rect *clips = NULL;
Daniel Vetter786b99e2012-12-02 21:53:40 +0100314 struct drm_framebuffer *fb;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200315 struct vmw_framebuffer *vfb;
316 uint32_t num_clips;
317 int ret;
318
319 num_clips = arg->num_clips;
320 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
321
322 if (unlikely(num_clips == 0))
323 return 0;
324
325 if (clips_ptr == NULL) {
326 DRM_ERROR("Argument clips_ptr must be specified.\n");
327 ret = -EINVAL;
328 goto out_clips;
329 }
330
Thomas Meyer24bb5a02011-11-29 22:08:00 +0100331 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200332 if (clips == NULL) {
333 DRM_ERROR("Failed to allocate clip rect list.\n");
334 ret = -ENOMEM;
335 goto out_clips;
336 }
337
338 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
339 if (ret) {
340 DRM_ERROR("Failed to copy clip rects from userspace.\n");
Dan Carpenterd2c184f2011-10-18 09:09:19 +0300341 ret = -EFAULT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200342 goto out_no_copy;
343 }
344
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100345 drm_modeset_lock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200346
Daniel Vetter786b99e2012-12-02 21:53:40 +0100347 fb = drm_framebuffer_lookup(dev, arg->fb_id);
348 if (!fb) {
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200349 DRM_ERROR("Invalid framebuffer id.\n");
Ville Syrjälä43789b92013-10-17 13:35:06 +0300350 ret = -ENOENT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200351 goto out_no_fb;
352 }
353
Daniel Vetter786b99e2012-12-02 21:53:40 +0100354 vfb = vmw_framebuffer_to_vfb(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200355 if (!vfb->dmabuf) {
356 DRM_ERROR("Framebuffer not dmabuf backed.\n");
357 ret = -EINVAL;
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100358 goto out_no_ttm_lock;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200359 }
360
361 ret = ttm_read_lock(&vmaster->lock, true);
362 if (unlikely(ret != 0))
363 goto out_no_ttm_lock;
364
365 ret = vmw_kms_readback(dev_priv, file_priv,
366 vfb, user_fence_rep,
367 clips, num_clips);
368
369 ttm_read_unlock(&vmaster->lock);
370out_no_ttm_lock:
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100371 drm_framebuffer_unreference(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200372out_no_fb:
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100373 drm_modeset_unlock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200374out_no_copy:
375 kfree(clips);
376out_clips:
377 return ret;
378}
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200379
380
381/**
382 * vmw_fops_poll - wrapper around the drm_poll function
383 *
384 * @filp: See the linux fops poll documentation.
385 * @wait: See the linux fops poll documentation.
386 *
387 * Wrapper around the drm_poll function that makes sure the device is
388 * processing the fifo if drm_poll decides to wait.
389 */
390unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
391{
392 struct drm_file *file_priv = filp->private_data;
393 struct vmw_private *dev_priv =
394 vmw_priv(file_priv->minor->dev);
395
396 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
397 return drm_poll(filp, wait);
398}
399
400
401/**
402 * vmw_fops_read - wrapper around the drm_read function
403 *
404 * @filp: See the linux fops read documentation.
405 * @buffer: See the linux fops read documentation.
406 * @count: See the linux fops read documentation.
407 * offset: See the linux fops read documentation.
408 *
409 * Wrapper around the drm_read function that makes sure the device is
410 * processing the fifo if drm_read decides to wait.
411 */
412ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
413 size_t count, loff_t *offset)
414{
415 struct drm_file *file_priv = filp->private_data;
416 struct vmw_private *dev_priv =
417 vmw_priv(file_priv->minor->dev);
418
419 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
420 return drm_read(filp, buffer, count, offset);
421}