[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <3070c172c135414c95697009f2876e70981ce563.1399973152.git.jslaby@suse.cz>
Date: Tue, 13 May 2014 11:28:38 +0200
From: Jiri Slaby <jslaby@...e.cz>
To: stable@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
Mikulas Patocka <mpatocka@...hat.com>,
Tomi Valkeinen <tomi.valkeinen@...com>,
Jiri Slaby <jslaby@...e.cz>
Subject: [PATCH 3.12 166/182] tgafb: fix data copying
From: Mikulas Patocka <mpatocka@...hat.com>
3.12-stable review patch. If anyone has any objections, please let me know.
===============
commit 6b0df6827bb6fcacb158dff29ad0a62d6418b534 upstream.
The functions for data copying copyarea_foreward_8bpp and
copyarea_backward_8bpp are buggy, they produce screen corruption.
This patch fixes the functions and moves the logic to one function
"copyarea_8bpp". For simplicity, the function only handles copying that
is aligned on 8 pixes. If we copy an unaligned area, generic function
cfb_copyarea is used.
Signed-off-by: Mikulas Patocka <mpatocka@...hat.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@...com>
Signed-off-by: Jiri Slaby <jslaby@...e.cz>
---
drivers/video/tgafb.c | 264 ++++++++++----------------------------------------
1 file changed, 51 insertions(+), 213 deletions(-)
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index c9c8e5a1fdee..ba77f753649c 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1142,222 +1142,57 @@ copyarea_line_32bpp(struct fb_info *info, u32 dy, u32 sy,
__raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
}
-/* The general case of forward copy in 8bpp mode. */
+/* The (almost) general case of backward copy in 8bpp mode. */
static inline void
-copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
- u32 height, u32 width, u32 line_length)
+copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+ u32 height, u32 width, u32 line_length,
+ const struct fb_copyarea *area)
{
struct tga_par *par = (struct tga_par *) info->par;
- unsigned long i, copied, left;
- unsigned long dpos, spos, dalign, salign, yincr;
- u32 smask_first, dmask_first, dmask_last;
- int pixel_shift, need_prime, need_second;
- unsigned long n64, n32, xincr_first;
+ unsigned i, yincr;
+ int depos, sepos, backward, last_step, step;
+ u32 mask_last;
+ unsigned n32;
void __iomem *tga_regs;
void __iomem *tga_fb;
- yincr = line_length;
- if (dy > sy) {
- dy += height - 1;
- sy += height - 1;
- yincr = -yincr;
- }
-
- /* Compute the offsets and alignments in the frame buffer.
- More than anything else, these control how we do copies. */
- dpos = dy * line_length + dx;
- spos = sy * line_length + sx;
- dalign = dpos & 7;
- salign = spos & 7;
- dpos &= -8;
- spos &= -8;
-
- /* Compute the value for the PIXELSHIFT register. This controls
- both non-co-aligned source and destination and copy direction. */
- if (dalign >= salign)
- pixel_shift = dalign - salign;
- else
- pixel_shift = 8 - (salign - dalign);
-
- /* Figure out if we need an additional priming step for the
- residue register. */
- need_prime = (salign > dalign);
- if (need_prime)
- dpos -= 8;
-
- /* Begin by copying the leading unaligned destination. Copy enough
- to make the next destination address 32-byte aligned. */
- copied = 32 - (dalign + (dpos & 31));
- if (copied == 32)
- copied = 0;
- xincr_first = (copied + 7) & -8;
- smask_first = dmask_first = (1ul << copied) - 1;
- smask_first <<= salign;
- dmask_first <<= dalign + need_prime*8;
- if (need_prime && copied > 24)
- copied -= 8;
- left = width - copied;
-
- /* Care for small copies. */
- if (copied > width) {
- u32 t;
- t = (1ul << width) - 1;
- t <<= dalign + need_prime*8;
- dmask_first &= t;
- left = 0;
- }
-
- /* Attempt to use 64-byte copies. This is only possible if the
- source and destination are co-aligned at 64 bytes. */
- n64 = need_second = 0;
- if ((dpos & 63) == (spos & 63)
- && (height == 1 || line_length % 64 == 0)) {
- /* We may need a 32-byte copy to ensure 64 byte alignment. */
- need_second = (dpos + xincr_first) & 63;
- if ((need_second & 32) != need_second)
- printk(KERN_ERR "tgafb: need_second wrong\n");
- if (left >= need_second + 64) {
- left -= need_second;
- n64 = left / 64;
- left %= 64;
- } else
- need_second = 0;
- }
-
- /* Copy trailing full 32-byte sections. This will be the main
- loop if the 64 byte loop can't be used. */
- n32 = left / 32;
- left %= 32;
-
- /* Copy the trailing unaligned destination. */
- dmask_last = (1ul << left) - 1;
-
- tga_regs = par->tga_regs_base;
- tga_fb = par->tga_fb_base;
-
- /* Set up the MODE and PIXELSHIFT registers. */
- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG);
- __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG);
- wmb();
-
- for (i = 0; i < height; ++i) {
- unsigned long j;
- void __iomem *sfb;
- void __iomem *dfb;
-
- sfb = tga_fb + spos;
- dfb = tga_fb + dpos;
- if (dmask_first) {
- __raw_writel(smask_first, sfb);
- wmb();
- __raw_writel(dmask_first, dfb);
- wmb();
- sfb += xincr_first;
- dfb += xincr_first;
- }
-
- if (need_second) {
- __raw_writel(0xffffffff, sfb);
- wmb();
- __raw_writel(0xffffffff, dfb);
- wmb();
- sfb += 32;
- dfb += 32;
- }
-
- if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63))
- printk(KERN_ERR
- "tgafb: misaligned copy64 (s:%p, d:%p)\n",
- sfb, dfb);
-
- for (j = 0; j < n64; ++j) {
- __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
- wmb();
- __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
- wmb();
- sfb += 64;
- dfb += 64;
- }
-
- for (j = 0; j < n32; ++j) {
- __raw_writel(0xffffffff, sfb);
- wmb();
- __raw_writel(0xffffffff, dfb);
- wmb();
- sfb += 32;
- dfb += 32;
- }
-
- if (dmask_last) {
- __raw_writel(0xffffffff, sfb);
- wmb();
- __raw_writel(dmask_last, dfb);
- wmb();
- }
-
- spos += yincr;
- dpos += yincr;
+ /* Do acceleration only if we are aligned on 8 pixels */
+ if ((dx | sx | width) & 7) {
+ cfb_copyarea(info, area);
+ return;
}
- /* Reset the MODE register to normal. */
- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
-}
-
-/* The (almost) general case of backward copy in 8bpp mode. */
-static inline void
-copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
- u32 height, u32 width, u32 line_length,
- const struct fb_copyarea *area)
-{
- struct tga_par *par = (struct tga_par *) info->par;
- unsigned long i, left, yincr;
- unsigned long depos, sepos, dealign, sealign;
- u32 mask_first, mask_last;
- unsigned long n32;
- void __iomem *tga_regs;
- void __iomem *tga_fb;
-
yincr = line_length;
if (dy > sy) {
dy += height - 1;
sy += height - 1;
yincr = -yincr;
}
+ backward = dy == sy && dx > sx && dx < sx + width;
/* Compute the offsets and alignments in the frame buffer.
More than anything else, these control how we do copies. */
- depos = dy * line_length + dx + width;
- sepos = sy * line_length + sx + width;
- dealign = depos & 7;
- sealign = sepos & 7;
-
- /* ??? The documentation appears to be incorrect (or very
- misleading) wrt how pixel shifting works in backward copy
- mode, i.e. when PIXELSHIFT is negative. I give up for now.
- Do handle the common case of co-aligned backward copies,
- but frob everything else back on generic code. */
- if (dealign != sealign) {
- cfb_copyarea(info, area);
- return;
- }
-
- /* We begin the copy with the trailing pixels of the
- unaligned destination. */
- mask_first = (1ul << dealign) - 1;
- left = width - dealign;
-
- /* Care for small copies. */
- if (dealign > width) {
- mask_first ^= (1ul << (dealign - width)) - 1;
- left = 0;
- }
+ depos = dy * line_length + dx;
+ sepos = sy * line_length + sx;
+ if (backward)
+ depos += width, sepos += width;
/* Next copy full words at a time. */
- n32 = left / 32;
- left %= 32;
+ n32 = width / 32;
+ last_step = width % 32;
/* Finally copy the unaligned head of the span. */
- mask_last = -1 << (32 - left);
+ mask_last = (1ul << last_step) - 1;
+
+ if (!backward) {
+ step = 32;
+ last_step = 32;
+ } else {
+ step = -32;
+ last_step = -last_step;
+ sepos -= 32;
+ depos -= 32;
+ }
tga_regs = par->tga_regs_base;
tga_fb = par->tga_fb_base;
@@ -1374,25 +1209,33 @@ copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
sfb = tga_fb + sepos;
dfb = tga_fb + depos;
- if (mask_first) {
- __raw_writel(mask_first, sfb);
- wmb();
- __raw_writel(mask_first, dfb);
- wmb();
- }
- for (j = 0; j < n32; ++j) {
- sfb -= 32;
- dfb -= 32;
+ for (j = 0; j < n32; j++) {
+ if (j < 2 && j + 1 < n32 && !backward &&
+ !(((unsigned long)sfb | (unsigned long)dfb) & 63)) {
+ do {
+ __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
+ wmb();
+ __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
+ wmb();
+ sfb += 64;
+ dfb += 64;
+ j += 2;
+ } while (j + 1 < n32);
+ j--;
+ continue;
+ }
__raw_writel(0xffffffff, sfb);
wmb();
__raw_writel(0xffffffff, dfb);
wmb();
+ sfb += step;
+ dfb += step;
}
if (mask_last) {
- sfb -= 32;
- dfb -= 32;
+ sfb += last_step - step;
+ dfb += last_step - step;
__raw_writel(mask_last, sfb);
wmb();
__raw_writel(mask_last, dfb);
@@ -1453,14 +1296,9 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
else if (bpp == 32)
cfb_copyarea(info, area);
- /* Detect overlapping source and destination that requires
- a backward copy. */
- else if (dy == sy && dx > sx && dx < sx + width)
- copyarea_backward_8bpp(info, dx, dy, sx, sy, height,
- width, line_length, area);
else
- copyarea_foreward_8bpp(info, dx, dy, sx, sy, height,
- width, line_length);
+ copyarea_8bpp(info, dx, dy, sx, sy, height,
+ width, line_length, area);
}
--
1.9.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists