Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 18 additions & 6 deletions kernel-open/nvidia-drm/nvidia-drm-fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -1390,6 +1390,7 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
NvU64 timeoutMS)
{
struct list_head *pending;
NvU64 fence_seqno;
unsigned long flags;

if (timeoutMS > NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS) {
Expand All @@ -1401,28 +1402,39 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
INIT_LIST_HEAD(&nv_fence->pending_node);

nv_fence->timeout = nv_drm_timeout_from_ms(timeoutMS);
fence_seqno = __nv_drm_get_semsurf_fence_seqno(nv_fence);

spin_lock_irqsave(&ctx->lock, flags);

/*
* Most callers append increasing wait values. Fast-path append in that
* case to avoid scanning the whole list on each fence creation.
*/
if (!list_empty(&ctx->pending_fences)) {
struct nv_drm_semsurf_fence *tail_fence =
list_last_entry(&ctx->pending_fences,
struct nv_drm_semsurf_fence, pending_node);
if (__nv_drm_get_semsurf_fence_seqno(tail_fence) <= fence_seqno) {
list_add_tail(&nv_fence->pending_node, &ctx->pending_fences);
goto added_pending;
}
}

list_for_each(pending, &ctx->pending_fences) {
struct nv_drm_semsurf_fence *pending_fence =
list_entry(pending, typeof(*pending_fence), pending_node);
if (__nv_drm_get_semsurf_fence_seqno(pending_fence) >
__nv_drm_get_semsurf_fence_seqno(nv_fence)) {
if (__nv_drm_get_semsurf_fence_seqno(pending_fence) > fence_seqno) {
/* Inserts 'nv_fence->pending_node' before 'pending' */
list_add_tail(&nv_fence->pending_node, pending);
break;
}
}

if (list_empty(&nv_fence->pending_node)) {
/*
* Inserts 'fence->pending_node' at the end of 'ctx->pending_fences',
* or as the head if the list is empty
*/
list_add_tail(&nv_fence->pending_node, &ctx->pending_fences);
}

added_pending:
/* Fence is live starting... now! */
spin_unlock_irqrestore(&ctx->lock, flags);

Expand Down
2 changes: 1 addition & 1 deletion kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
BUG_ON(dma_buf->size % PAGE_SIZE);

pMemory = NULL;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (drm_core_check_feature(dev, DRIVER_MODESET) && nv_dev->pDevice != NULL) {
pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice,
(NvP64)(NvUPtr)dma_buf,
dma_buf->size - 1);
Expand Down
27 changes: 18 additions & 9 deletions kernel-open/nvidia-drm/nvidia-drm-modeset.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,22 +136,24 @@ static bool __will_generate_flip_event(struct drm_crtc *crtc,

static int __nv_drm_put_back_post_fence_fd(
struct nv_drm_plane_state *plane_state,
struct nv_drm_device *nv_dev,
const struct NvKmsKapiLayerReplyConfig *layer_reply_config)
{
int fd = layer_reply_config->postSyncptFd;
int ret = 0;

if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) {
ret = copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd));
if (ret != 0) {
return ret;
if (copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd)) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to copy post fence FD to userspace");
return -EFAULT;
}

/*! set back to Null and let set_property specify it again */
plane_state->fd_user_ptr = NULL;
}

return ret;
return 0;
}

struct nv_drm_plane_fence_cb_data {
Expand Down Expand Up @@ -300,7 +302,7 @@ static int __nv_drm_convert_in_fences(
default:
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed plane fence callback registration");
"Failed plane fence callback registration, ret=%d", ret);
/* Fence callback registration failed */
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
nv_drm_free(fence_data);
Expand All @@ -315,7 +317,6 @@ static int __nv_drm_get_syncpt_data(
struct nv_drm_device *nv_dev,
struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state,
struct NvKmsKapiRequestedModeSetConfig *requested_config,
struct NvKmsKapiModeSetReplyConfig *reply_config)
{
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
Expand Down Expand Up @@ -355,6 +356,7 @@ static int __nv_drm_get_syncpt_data(

ret = __nv_drm_put_back_post_fence_fd(
plane_state,
nv_dev,
&head_reply_config->layerReplyConfig[nv_plane->layer_idx]);

if (ret != 0) {
Expand Down Expand Up @@ -488,6 +490,11 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
&reply_config,
commit)) {
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to apply modeset config (commit=%u, flipResult=%u)",
commit,
reply_config.flipResult);
return -EINVAL;
}
}
Expand All @@ -497,7 +504,7 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
/*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */
ret = __nv_drm_get_syncpt_data(
nv_dev, crtc, old_crtc_state, requested_config, &reply_config);
nv_dev, crtc, old_crtc_state, &reply_config);
if (ret != 0) {
return ret;
}
Expand Down Expand Up @@ -611,7 +618,9 @@ static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc)
}
spin_unlock(&dev->event_lock);

wake_up_all(&nv_dev->flip_event_wq);
if (nv_flip != NULL) {
wake_up_all(&nv_dev->flip_event_wq);
}

nv_drm_free(nv_flip);
}
Expand Down