diff --git a/Makefile b/Makefile index b5f32ce..7200160 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 22 -EXTRAVERSION = .21-op1 +EXTRAVERSION = .22-op1 NAME = Holy Dancing Manatees, Batman! # *DOCUMENTATION* diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 8617298..e8fcce7 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -843,7 +843,7 @@ static inline int de_is_running (struct de_private *de) static void de_stop_rxtx (struct de_private *de) { u32 macmode; - unsigned int work = 1000; + unsigned int i = 1300/100; macmode = dr32(MacMode); if (macmode & RxTx) { @@ -851,10 +851,14 @@ static void de_stop_rxtx (struct de_private *de) dr32(MacMode); } - while (--work > 0) { + /* wait until in-flight frame completes. + * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) + * Typically expect this loop to end in < 50 us on 100BT. + */ + while (--i) { if (!de_is_running(de)) return; - cpu_relax(); + udelay(100); } printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); diff --git a/fs/aio.c b/fs/aio.c index dbe699e..b3419c5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -303,7 +303,7 @@ static void wait_for_all_aios(struct kioctx *ctx) set_task_state(tsk, TASK_UNINTERRUPTIBLE); while (ctx->reqs_active) { spin_unlock_irq(&ctx->ctx_lock); - schedule(); + io_schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); spin_lock_irq(&ctx->ctx_lock); } @@ -323,7 +323,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) set_current_state(TASK_UNINTERRUPTIBLE); if (!iocb->ki_users) break; - schedule(); + io_schedule(); } __set_current_state(TASK_RUNNING); return iocb->ki_user_data; @@ -946,14 +946,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) return 1; } - /* - * Check if the user asked us to deliver the result through an - * eventfd. The eventfd_signal() function is safe to be called - * from IRQ context. - */ - if (!IS_ERR(iocb->ki_eventfd)) - eventfd_signal(iocb->ki_eventfd, 1); - info = &ctx->ring_info; /* add a completion event to the ring buffer. @@ -1002,6 +994,15 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) kunmap_atomic(ring, KM_IRQ1); pr_debug("added to ring %p at [%lu]\n", iocb, tail); + + /* + * Check if the user asked us to deliver the result through an + * eventfd. The eventfd_signal() function is safe to be called + * from IRQ context. + */ + if (!IS_ERR(iocb->ki_eventfd)) + eventfd_signal(iocb->ki_eventfd, 1); + put_rq: /* everything turned out well, dispose of the aiocb. */ ret = __aio_put_req(ctx, iocb); @@ -1170,7 +1171,7 @@ retry: ret = 0; if (to.timed_out) /* Only check after read evt */ break; - schedule(); + io_schedule(); if (signal_pending(tsk)) { ret = -EINTR; break; diff --git a/fs/dcache.c b/fs/dcache.c index 0e73aa0..c54dc50 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1407,9 +1407,6 @@ void d_delete(struct dentry * dentry) if (atomic_read(&dentry->d_count) == 1) { dentry_iput(dentry); fsnotify_nameremove(dentry, isdir); - - /* remove this and other inotify debug checks after 2.6.18 */ - dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; return; } diff --git a/fs/inotify.c b/fs/inotify.c index 7457501..8ee2b43 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched) struct dentry *child; list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { - if (!child->d_inode) { - WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); + if (!child->d_inode) continue; - } + spin_lock(&child->d_lock); - if (watched) { - WARN_ON(child->d_flags & - DCACHE_INOTIFY_PARENT_WATCHED); + if (watched) child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - } else { - WARN_ON(!(child->d_flags & - DCACHE_INOTIFY_PARENT_WATCHED)); - child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED; - } + else + child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } } @@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode) if (!inode) return; - WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); spin_lock(&entry->d_lock); parent = entry->d_parent; if (parent->d_inode && inotify_inode_watched(parent->d_inode)) @@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, struct inode *inode, u32 mask) { int ret = 0; + int newly_watched; /* don't allow invalid bits: we don't want flags set */ mask &= IN_ALL_EVENTS | IN_ONESHOT; @@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, */ watch->inode = igrab(inode); - if (!inotify_inode_watched(inode)) - set_dentry_child_flags(inode, 1); - /* Add the watch to the handle's and the inode's list */ + newly_watched = !inotify_inode_watched(inode); list_add(&watch->h_list, &ih->watches); list_add(&watch->i_list, &inode->inotify_watches); + /* + * Set child flags _after_ adding the watch, so there is no race + * windows where newly instantiated children could miss their parent's + * watched flag. + */ + if (newly_watched) + set_dentry_child_flags(inode, 1); + out: mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); diff --git a/fs/splice.c b/fs/splice.c index dbbe267..3da87fe 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -28,6 +28,7 @@ #include #include #include +#include struct partial_page { unsigned int offset; @@ -331,7 +332,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, break; error = add_to_page_cache_lru(page, mapping, index, - GFP_KERNEL); + mapping_gfp_mask(mapping)); if (unlikely(error)) { page_cache_release(page); if (error == -EEXIST) @@ -932,6 +933,10 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, if (unlikely(ret < 0)) return ret; + ret = security_file_permission(out, MAY_WRITE); + if (unlikely(ret < 0)) + return ret; + return out->f_op->splice_write(pipe, out, ppos, len, flags); } @@ -954,6 +959,10 @@ static long do_splice_to(struct file *in, loff_t *ppos, if (unlikely(ret < 0)) return ret; + ret = security_file_permission(in, MAY_READ); + if (unlikely(ret < 0)) + return ret; + return in->f_op->splice_read(in, ppos, pipe, len, flags); } @@ -1489,6 +1498,13 @@ static int link_pipe(struct pipe_inode_info *ipipe, i++; } while (len); + /* + * return EAGAIN if we have the potential of some data in the + * future, otherwise just return 0 + */ + if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) + ret = -EAGAIN; + inode_double_unlock(ipipe->inode, opipe->inode); /* @@ -1529,11 +1545,8 @@ static long do_tee(struct file *in, struct file *out, size_t len, ret = link_ipipe_prep(ipipe, flags); if (!ret) { ret = link_opipe_prep(opipe, flags); - if (!ret) { + if (!ret) ret = link_pipe(ipipe, opipe, len, flags); - if (!ret && (flags & SPLICE_F_NONBLOCK)) - ret = -EAGAIN; - } } } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index eee57e6..de6e5df 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -246,7 +246,7 @@ static u16 tcp_select_window(struct sock *sk) * * Relax Will Robinson. */ - new_win = cur_win; + new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); } tp->rcv_wnd = new_win; tp->rcv_wup = tp->rcv_nxt;