Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions drivers/gpu/drm/drm_modeset_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)

if (!dev)
return 0;
/*
* Don't disable polling if it was never initialized
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);

drm_kms_helper_poll_disable(dev);
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_kms_helper_poll_enable(dev);
/*
* Don't enable polling if it was never initialized
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);

return PTR_ERR(state);
}

Expand Down Expand Up @@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
dev->mode_config.suspend_state = NULL;

drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_kms_helper_poll_enable(dev);
/*
* Don't enable polling if it is not initialized
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);

return ret;
}
Expand Down
13 changes: 11 additions & 2 deletions drivers/gpu/drm/drm_probe_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev)
* Drivers can call this helper from their device resume implementation. It is
* not an error to call this even when output polling isn't enabled.
*
* If device polling was never initialized before, this call will trigger a
* warning and return.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
dev->mode_config.poll_running)
if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) ||
!drm_kms_helper_poll || dev->mode_config.poll_running)
return;

if (drm_kms_helper_enable_hpd(dev) ||
Expand Down Expand Up @@ -871,12 +874,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* not an error to call this even when output polling isn't enabled or already
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
* If however, the polling was never initialized, this call will trigger a
* warning and return
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled))
return;

if (dev->mode_config.poll_running)
drm_kms_helper_disable_hpd(dev);

Expand Down
13 changes: 7 additions & 6 deletions drivers/gpu/drm/scheduler/sched_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -775,13 +775,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(resv);

dma_resv_for_each_fence(&cursor, resv, usage, fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
if (ret) {
dma_fence_put(fence);
/*
* As drm_sched_job_add_dependency always consumes the fence
* reference (even when it fails), and dma_resv_for_each_fence
* is not obtaining one, we need to grab one before calling.
*/
ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
if (ret)
return ret;
}
}
return 0;
}
Expand Down
5 changes: 5 additions & 0 deletions drivers/net/ethernet/aquantia/atlantic/aq_ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -576,6 +576,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,

if (!buff->is_eop) {
unsigned int frag_cnt = 0U;

/* There will be an extra fragment */
if (buff->len > AQ_CFG_RX_HDR_SIZE)
frag_cnt++;

buff_ = buff;
do {
bool is_rsc_completed = true;
Expand Down
4 changes: 2 additions & 2 deletions fs/smb/client/readdir.c
Original file line number Diff line number Diff line change
Expand Up @@ -786,11 +786,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
search_flags,
&cfile->srch_inf);
if (rc)
return -ENOENT;
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cfile->srch_inf.last_entry)
cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
if (rc)
return -ENOENT;
}
if (index_to_find < cfile->srch_inf.index_of_last_entry) {
/* we found the buffer that contains the entry */
Expand Down
19 changes: 12 additions & 7 deletions net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -930,14 +930,19 @@ static void mptcp_reset_rtx_timer(struct sock *sk)

bool mptcp_schedule_work(struct sock *sk)
{
if (inet_sk_state_load(sk) != TCP_CLOSE &&
schedule_work(&mptcp_sk(sk)->work)) {
/* each subflow already holds a reference to the sk, and the
* workqueue is invoked by a subflow, so sk can't go away here.
*/
sock_hold(sk);
if (inet_sk_state_load(sk) == TCP_CLOSE)
return false;

/* Get a reference on this socket, mptcp_worker() will release it.
* As mptcp_worker() might complete before us, we can not avoid
* a sock_hold()/sock_put() if schedule_work() returns false.
*/
sock_hold(sk);

if (schedule_work(&mptcp_sk(sk)->work))
return true;
}

sock_put(sk);
return false;
}

Expand Down
40 changes: 31 additions & 9 deletions net/vmw_vsock/af_vsock.c
Original file line number Diff line number Diff line change
Expand Up @@ -1507,18 +1507,40 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
timeout = schedule_timeout(timeout);
lock_sock(sk);

if (signal_pending(current)) {
err = sock_intr_errno(timeout);
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
vsock_remove_connected(vsk);
goto out_wait;
} else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
err = -ETIMEDOUT;
/* Connection established. Whatever happens to socket once we
* release it, that's not connect()'s concern. No need to go
* into signal and timeout handling. Call it a day.
*
* Note that allowing to "reset" an already established socket
* here is racy and insecure.
*/
if (sk->sk_state == TCP_ESTABLISHED)
break;

/* If connection was _not_ established and a signal/timeout came
* to be, we want the socket's state reset. User space may want
* to retry.
*
* sk_state != TCP_ESTABLISHED implies that socket is not on
* vsock_connected_table. We keep the binding and the transport
* assigned.
*/
if (signal_pending(current) || timeout == 0) {
err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout);

/* Listener might have already responded with
* VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our
* sk_state == TCP_SYN_SENT, which hereby we break.
* In such case VIRTIO_VSOCK_OP_RST will follow.
*/
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;

/* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by
* transport->connect().
*/
vsock_transport_cancel_pkt(vsk);

goto out_wait;
}

Expand Down
Loading