Commit a3b196f6 authored by Bryce Hepner's avatar Bryce Hepner

Bib change

parent c2f2988f
Pipeline #2572 passed with stage
in 7 seconds
...@@ -50,29 +50,24 @@ ...@@ -50,29 +50,24 @@
\@writefile{brf}{\backcite{Huffman}{{3}{3}{section.3}}} \@writefile{brf}{\backcite{Huffman}{{3}{3}{section.3}}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Encoding the Pixel Values\relax }}{3}{figure.caption.2}\protected@file@percent } \@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Encoding the Pixel Values\relax }}{3}{figure.caption.2}\protected@file@percent }
\newlabel{fig:sub1}{{2}{3}{Encoding the Pixel Values\relax }{figure.caption.2}{}} \newlabel{fig:sub1}{{2}{3}{Encoding the Pixel Values\relax }{figure.caption.2}{}}
\@writefile{brf}{\backcite{Numpy}{{3}{3}{figure.caption.3}}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Encoding the Error Values\relax }}{3}{figure.caption.3}\protected@file@percent } \@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Encoding the Error Values\relax }}{3}{figure.caption.3}\protected@file@percent }
\newlabel{fig:sub2}{{3}{3}{Encoding the Error Values\relax }{figure.caption.3}{}} \newlabel{fig:sub2}{{3}{3}{Encoding the Error Values\relax }{figure.caption.3}{}}
\@writefile{brf}{\backcite{Numpy}{{3}{3}{figure.caption.3}}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Results}{3}{section.4}\protected@file@percent } \@writefile{toc}{\contentsline {section}{\numberline {4}Results}{3}{section.4}\protected@file@percent }
\citation{Numpy}
\citation{LAPACKAlgorithms} \citation{LAPACKAlgorithms}
\citation{DBLP:journals/corr/abs-2010-05846}
\citation{LeastSquaredProblem} \citation{LeastSquaredProblem}
\bibstyle{ieee} \bibstyle{ieee}
\bibdata{main} \bibdata{main}
\bibcite{ABRARDO1997321}{1} \bibcite{ABRARDO1997321}{1}
\bibcite{AIAZZI20021619}{2} \bibcite{AIAZZI20021619}{2}
\bibcite{DBLP:journals/corr/abs-2010-05846}{3} \bibcite{LeastSquaredProblem}{3}
\bibcite{LeastSquaredProblem}{4} \bibcite{LAPACKAlgorithms}{4}
\bibcite{LAPACKAlgorithms}{5} \bibcite{Dahlen1993}{5}
\bibcite{Dahlen1993}{6} \bibcite{PNGdetails}{6}
\bibcite{PNGdetails}{7} \bibcite{Numpy}{7}
\bibcite{Numpy}{8} \bibcite{Huffman}{8}
\bibcite{Huffman}{9} \bibcite{PNGoverview}{9}
\bibcite{PNGoverview}{10} \bibcite{LZW}{10}
\bibcite{LZW}{11}
\@writefile{toc}{\contentsline {section}{\numberline {5}Discussion}{4}{section.5}\protected@file@percent } \@writefile{toc}{\contentsline {section}{\numberline {5}Discussion}{4}{section.5}\protected@file@percent }
\@writefile{brf}{\backcite{Numpy}{{4}{5}{section.5}}}
\@writefile{brf}{\backcite{LAPACKAlgorithms}{{4}{5}{section.5}}} \@writefile{brf}{\backcite{LAPACKAlgorithms}{{4}{5}{section.5}}}
\@writefile{brf}{\backcite{DBLP:journals/corr/abs-2010-05846}{{4}{5}{section.5}}}
\@writefile{brf}{\backcite{LeastSquaredProblem}{{4}{5}{section.5}}} \@writefile{brf}{\backcite{LeastSquaredProblem}{{4}{5}{section.5}}}
...@@ -11,11 +11,6 @@ B.~Aiazzi, L.~Alparone, and S.~Baronti. ...@@ -11,11 +11,6 @@ B.~Aiazzi, L.~Alparone, and S.~Baronti.
\newblock Near-lossless image compression by relaxation-labelled prediction. \newblock Near-lossless image compression by relaxation-labelled prediction.
\newblock {\em Signal Processing}, 82(11):1619--1631, 2002. \newblock {\em Signal Processing}, 82(11):1619--1631, 2002.
\bibitem{DBLP:journals/corr/abs-2010-05846}
J.~Alman and V.~V. Williams.
\newblock A refined laser method and faster matrix multiplication.
\newblock {\em CoRR}, abs/2010.05846, 2020.
\bibitem{LeastSquaredProblem} \bibitem{LeastSquaredProblem}
J.~Alman and V.~V. Williams. J.~Alman and V.~V. Williams.
\newblock Algorithm 853: an efficient algorithm for solving rank-deficient \newblock Algorithm 853: an efficient algorithm for solving rank-deficient
......
...@@ -3,44 +3,44 @@ Capacity: max_strings=200000, hash_size=200000, hash_prime=170003 ...@@ -3,44 +3,44 @@ Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
The top-level auxiliary file: main.aux The top-level auxiliary file: main.aux
The style file: ieee.bst The style file: ieee.bst
Database file #1: main.bib Database file #1: main.bib
You've used 11 entries, You've used 10 entries,
2120 wiz_defined-function locations, 2120 wiz_defined-function locations,
583 strings with 6326 characters, 578 strings with 6187 characters,
and the built_in function-call counts, 3869 in all, are: and the built_in function-call counts, 3623 in all, are:
= -- 356 = -- 332
> -- 243 > -- 230
< -- 0 < -- 0
+ -- 100 + -- 94
- -- 86 - -- 82
* -- 308 * -- 295
:= -- 684 := -- 638
add.period$ -- 36 add.period$ -- 33
call.type$ -- 11 call.type$ -- 10
change.case$ -- 76 change.case$ -- 71
chr.to.int$ -- 0 chr.to.int$ -- 0
cite$ -- 11 cite$ -- 10
duplicate$ -- 107 duplicate$ -- 97
empty$ -- 264 empty$ -- 247
format.name$ -- 86 format.name$ -- 82
if$ -- 769 if$ -- 720
int.to.chr$ -- 0 int.to.chr$ -- 0
int.to.str$ -- 11 int.to.str$ -- 10
missing$ -- 8 missing$ -- 7
newline$ -- 61 newline$ -- 56
num.names$ -- 22 num.names$ -- 20
pop$ -- 72 pop$ -- 69
preamble$ -- 1 preamble$ -- 1
purify$ -- 65 purify$ -- 61
quote$ -- 0 quote$ -- 0
skip$ -- 78 skip$ -- 69
stack$ -- 0 stack$ -- 0
substring$ -- 191 substring$ -- 185
swap$ -- 11 swap$ -- 10
text.length$ -- 0 text.length$ -- 0
text.prefix$ -- 0 text.prefix$ -- 0
top$ -- 0 top$ -- 0
type$ -- 44 type$ -- 40
warning$ -- 0 warning$ -- 0
while$ -- 29 while$ -- 27
width$ -- 13 width$ -- 12
write$ -- 126 write$ -- 115
...@@ -9,7 +9,5 @@ ...@@ -9,7 +9,5 @@
\backcite {Numpy}{{3}{3}{section.3}} \backcite {Numpy}{{3}{3}{section.3}}
\backcite {Huffman}{{3}{3}{section.3}} \backcite {Huffman}{{3}{3}{section.3}}
\backcite {Numpy}{{3}{3}{figure.caption.3}} \backcite {Numpy}{{3}{3}{figure.caption.3}}
\backcite {Numpy}{{4}{5}{section.5}}
\backcite {LAPACKAlgorithms}{{4}{5}{section.5}} \backcite {LAPACKAlgorithms}{{4}{5}{section.5}}
\backcite {DBLP:journals/corr/abs-2010-05846}{{4}{5}{section.5}}
\backcite {LeastSquaredProblem}{{4}{5}{section.5}} \backcite {LeastSquaredProblem}{{4}{5}{section.5}}
This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2020.7.20) 27 JUN 2022 15:53 This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2020.7.20) 28 JUN 2022 15:11
entering extended mode entering extended mode
restricted \write18 enabled. restricted \write18 enabled.
%&-line parsing enabled. %&-line parsing enabled.
...@@ -413,12 +413,12 @@ Package pdftex.def Info: PixelArrangement.png used on input line 112. ...@@ -413,12 +413,12 @@ Package pdftex.def Info: PixelArrangement.png used on input line 112.
<./PixelArrangement.png (PNG copy)>] [2] <./PixelArrangement.png (PNG copy)>] [2]
<Uniform_No_Title.png, id=84, 462.528pt x 346.896pt> <Uniform_No_Title.png, id=83, 462.528pt x 346.896pt>
File: Uniform_No_Title.png Graphic file (type png) File: Uniform_No_Title.png Graphic file (type png)
<use Uniform_No_Title.png> <use Uniform_No_Title.png>
Package pdftex.def Info: Uniform_No_Title.png used on input line 238. Package pdftex.def Info: Uniform_No_Title.png used on input line 238.
(pdftex.def) Requested size: 237.13594pt x 177.8515pt. (pdftex.def) Requested size: 237.13594pt x 177.8515pt.
<Normal_No_Title.png, id=86, 462.528pt x 346.896pt> <Normal_No_Title.png, id=85, 462.528pt x 346.896pt>
File: Normal_No_Title.png Graphic file (type png) File: Normal_No_Title.png Graphic file (type png)
<use Normal_No_Title.png> <use Normal_No_Title.png>
Package pdftex.def Info: Normal_No_Title.png used on input line 244. Package pdftex.def Info: Normal_No_Title.png used on input line 244.
...@@ -427,9 +427,9 @@ Package pdftex.def Info: Normal_No_Title.png used on input line 244. ...@@ -427,9 +427,9 @@ Package pdftex.def Info: Normal_No_Title.png used on input line 244.
LaTeX Warning: `h' float specifier changed to `ht'. LaTeX Warning: `h' float specifier changed to `ht'.
[3 <./Uniform_No_Title.png> <./Normal_No_Title.png>]
Underfull \hbox (badness 4713) in paragraph at lines 270--282 Underfull \hbox (badness 7273) in paragraph at lines 270--282
[]\OT1/cmr/m/n/10 We are us-ing it on a set of at least 16 []\OT1/cmr/m/n/10 This was tested on a set of a least 16
[] []
...@@ -448,12 +448,12 @@ Underfull \hbox (badness 3428) in paragraph at lines 270--282 ...@@ -448,12 +448,12 @@ Underfull \hbox (badness 3428) in paragraph at lines 270--282
0 3973$ to $0\OML/cmm/m/it/10 :\OT1/cmr/m/n/10 4193$. 0 3973$ to $0\OML/cmm/m/it/10 :\OT1/cmr/m/n/10 4193$.
[] []
[3 <./Uniform_No_Title.png> <./Normal_No_Title.png>] (./main.bbl (./main.brf) (./main.bbl (./main.brf)
\tf@brf=\write4 \tf@brf=\write4
\openout4 = `main.brf'. \openout4 = `main.brf'.
Underfull \hbox (badness 7362) in paragraph at lines 31--31 Underfull \hbox (badness 7362) in paragraph at lines 26--26
\OT1/cmtt/m/n/9 netlib . org / lapack / lug / node71 . html$[][]\OT1/cmr/m/n/9 \OT1/cmtt/m/n/9 netlib . org / lapack / lug / node71 . html$[][]\OT1/cmr/m/n/9
, Oct. 1999. , Oct. 1999.
[] []
...@@ -473,16 +473,16 @@ Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 314. ...@@ -473,16 +473,16 @@ Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 314.
Package rerunfilecheck Info: File `main.out' has not changed. Package rerunfilecheck Info: File `main.out' has not changed.
(rerunfilecheck) Checksum: 90A24BEB086706678095977998C56209;523. (rerunfilecheck) Checksum: 90A24BEB086706678095977998C56209;523.
Package rerunfilecheck Info: File `main.brf' has not changed. Package rerunfilecheck Info: File `main.brf' has not changed.
(rerunfilecheck) Checksum: 88896D3D6F5D891BCB4221968D720CD5;715. (rerunfilecheck) Checksum: BB047529470216DFDC4D0933E0F06F40;613.
LaTeX Font Warning: Some font shapes were not available, defaults substituted. LaTeX Font Warning: Some font shapes were not available, defaults substituted.
) )
Here is how much of TeX's memory you used: Here is how much of TeX's memory you used:
8443 strings out of 481239 8438 strings out of 481239
129230 string characters out of 5920377 129046 string characters out of 5920377
402204 words of memory out of 5000000 403148 words of memory out of 5000000
23521 multiletter control sequences out of 15000+600000 23517 multiletter control sequences out of 15000+600000
541812 words of font info for 57 fonts, out of 8000000 for 9000 541812 words of font info for 57 fonts, out of 8000000 for 9000
1142 hyphenation exceptions out of 8191 1142 hyphenation exceptions out of 8191
47i,9n,42p,782b,389s stack positions out of 5000i,500n,10000p,200000b,80000s 47i,9n,42p,782b,389s stack positions out of 5000i,500n,10000p,200000b,80000s
...@@ -490,21 +490,20 @@ Here is how much of TeX's memory you used: ...@@ -490,21 +490,20 @@ Here is how much of TeX's memory you used:
r/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx12.pfb></usr/shar r/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx12.pfb></usr/shar
e/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmex10.pfb></usr/share/texl e/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmex10.pfb></usr/share/texl
ive/texmf-dist/fonts/type1/public/amsfonts/cm/cmmi10.pfb></usr/share/texlive/te ive/texmf-dist/fonts/type1/public/amsfonts/cm/cmmi10.pfb></usr/share/texlive/te
xmf-dist/fonts/type1/public/amsfonts/cm/cmmi7.pfb></usr/share/texlive/texmf-dis xmf-dist/fonts/type1/public/amsfonts/cm/cmr10.pfb></usr/share/texlive/texmf-dis
t/fonts/type1/public/amsfonts/cm/cmr10.pfb></usr/share/texlive/texmf-dist/fonts t/fonts/type1/public/amsfonts/cm/cmr12.pfb></usr/share/texlive/texmf-dist/fonts
/type1/public/amsfonts/cm/cmr12.pfb></usr/share/texlive/texmf-dist/fonts/type1/ /type1/public/amsfonts/cm/cmr17.pfb></usr/share/texlive/texmf-dist/fonts/type1/
public/amsfonts/cm/cmr17.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/ public/amsfonts/cm/cmr7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/a
amsfonts/cm/cmr7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts msfonts/cm/cmr9.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/
/cm/cmr9.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy cm/cmsy10.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cms
10.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmsy9.pfb> y9.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti10.pfb
</usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti10.pfb></usr/ ></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti9.pfb></usr/
share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti9.pfb></usr/share/t share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt10.pfb></usr/share/
exlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt10.pfb></usr/share/texlive texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt9.pfb>
/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt9.pfb> Output written on main.pdf (4 pages, 247647 bytes).
Output written on main.pdf (4 pages, 255632 bytes).
PDF statistics: PDF statistics:
189 PDF objects out of 1000 (max. 8388607) 180 PDF objects out of 1000 (max. 8388607)
160 compressed objects within 2 object streams 152 compressed objects within 2 object streams
29 named destinations out of 1000 (max. 500000) 28 named destinations out of 1000 (max. 500000)
96 words of extra memory for PDF output out of 10000 (max. 10000000) 96 words of extra memory for PDF output out of 10000 (max. 10000000)
No preview for this file type
No preview for this file type
...@@ -105,7 +105,7 @@ Like a cathode-ray tube in a television, the algorithm goes line by line, readin ...@@ -105,7 +105,7 @@ Like a cathode-ray tube in a television, the algorithm goes line by line, readin
Each pixel, as long as it is not on the top or side boundaries, will have 4 neighbors that have already been read into the machine. Each pixel, as long as it is not on the top or side boundaries, will have 4 neighbors that have already been read into the machine.
Those points can be analyzed and interpolated to find the next pixel's value. Those points can be analyzed and interpolated to find the next pixel's value.
The goal is to encode the error between that value and the original value, save that, and use that to compress and decompress the image. The goal is to encode the error between that value and the original value, save that, and use that to compress and decompress the image.
Even though a possibly larger integer may need to be stored, it's more likely that the guess will be correct, or off by a small margin, making the distribution and better for compression. Even though a possibly larger integer may need to be stored, it's more likely that the guess will be correct, or off by a small margin, making the distribution better for compression.
\begin{figure}[h] \begin{figure}[h]
\centering \centering
...@@ -113,11 +113,11 @@ Even though a possibly larger integer may need to be stored, it's more likely th ...@@ -113,11 +113,11 @@ Even though a possibly larger integer may need to be stored, it's more likely th
\caption{\label{fig:pixels}The other 4 pixels are used to find the value of the 5th.} \caption{\label{fig:pixels}The other 4 pixels are used to find the value of the 5th.}
\end{figure} \end{figure}
\subsection{Background} \subsection{Background}
The images that were used in the development of this paper are all thermal images, with values ranging from 19,197 to 25,935. The images that were used in the development of this paper were all thermal images, with values ranging from 19,197 to 25,935.
Total possible values can range from 0 to 32,768. In the system, total possible values can range from 0 to 32,768.
Everything detailed here can still apply to standard grayscale or RGB images, but for testing, only 16 bit thermal images were used.
Most images had ranges of at most 4,096 between the smallest and the largest pixel values. Most images had ranges of at most 4,096 between the smallest and the largest pixel values.
The camera being used has 16 forward facing thermal sensors creating 16 similar thermal images every frame. The camera being used has 16 forward facing thermal sensors creating 16 similar thermal images every frame.
Everything detailed here can still apply to standard grayscale or RGB images, but for testing, only 16 bit thermal images were used.
\section{Related Work} \section{Related Work}
...@@ -129,14 +129,14 @@ For example, if there are two identical blocks of just the color blue, the secon ...@@ -129,14 +129,14 @@ For example, if there are two identical blocks of just the color blue, the secon
Instead of saving two full blocks, the second one just contains the location of the first, telling the decoder to use that block. Instead of saving two full blocks, the second one just contains the location of the first, telling the decoder to use that block.
Huffman encoding is then used to save these numbers, optimizing how the location data is stored. Huffman encoding is then used to save these numbers, optimizing how the location data is stored.
If one pattern is more frequent, the algorithm should optimize over this, producing an even smaller file\cite{PNGdetails}. If one pattern is more frequent, the algorithm should optimize over this, producing an even smaller file\cite{PNGdetails}.
The Huffman encoding portion is what separates LZ77 from ``deflate'', the algorithm summarized here, and the one used in PNG. The Huffman encoding in conjuction with LZ77 helps form ``deflate'', the algorithm summarized here, and the one used in PNG.
Our algorithm has a similar use of Huffman encoding, but a completely different algorithm than LZ77. Our algorithm has a similar use of Huffman encoding, but a completely different algorithm than LZ77.
LZ77 seeks patterns between blocks while ours has no block structure and no explicit pattern functionality. LZ77 seeks patterns between blocks while ours has no block structure and no explicit pattern functionality.
Ours uses the equivalent block size of 1, and instead of encoding the data it encodes alternate information which is used to compress. Ours uses the equivalent block size of 1, and instead of encoding the data it encodes alternate data which is used to compress.
\subsection{LZW} \subsection{LZW}
LZW operates differently by creating a separate code table that maps every sequence to a code. LZW operates differently by creating a separate code table that maps every sequence to a code.
Although this is used for an image, the original paper by Welch \cite {LZW} explains it through text examples which will be done here as well . Although this is used for an image, the original paper by Welch \cite {LZW} explains it through text examples, which will be done here as well .
Instead of looking at each character individually, it looks at variable length string chains and compresses those. Instead of looking at each character individually, it looks at variable length string chains and compresses those.
Passing through the items to be compressed, if a phrase has already been encountered, it saves the reference to the original phrase along with the next character in sequence. Passing through the items to be compressed, if a phrase has already been encountered, it saves the reference to the original phrase along with the next character in sequence.
In this way, the longer repeated phrases are automatically found and can be compressed to be smaller. In this way, the longer repeated phrases are automatically found and can be compressed to be smaller.
...@@ -144,35 +144,35 @@ This system also uses blocks like PNG in order to save patterns in the data, but ...@@ -144,35 +144,35 @@ This system also uses blocks like PNG in order to save patterns in the data, but
Ours, similarly to PNG, only looks at a short portion of the data, which may have an advantage over LZW for images. Ours, similarly to PNG, only looks at a short portion of the data, which may have an advantage over LZW for images.
Images generally do not have the same patterns that text does, so it may be advantageous to not use the entire corpus in compressing an image and instead only evaluate it based off of nearby objects. Images generally do not have the same patterns that text does, so it may be advantageous to not use the entire corpus in compressing an image and instead only evaluate it based off of nearby objects.
The blue parts of the sky will be next to other blue parts of the sky, and in the realm of thermal images, objects will probably be most similar to nearby ones in temperature due to how heat flows. The blue parts of the sky will be next to other blue parts of the sky, and in the realm of thermal images, temperatures will probably be most similar to nearby ones due to how heat flows.
\subsection{Similar Methods} \subsection{Similar Methods}
Our research did not find any very similar approaches, especially with 16-bit thermal images. Our research did not find any very similar approaches, especially with 16-bit thermal images.
One paper that comes close is ``Encoding-interleaved hierarchical interpolation for lossless image compression'' \cite{ABRARDO1997321}. There are many papers however that may have influenced ours indirectly or come close to ours and need to be mentioned for both their similarities and differences.
One paper that is close is ``Encoding-interleaved hierarchical interpolation for lossless image compression'' \cite{ABRARDO1997321}.
This method seems to operate with a similar end goal, to save the interpolation, but operates on a different system, including how it interpolates. This method seems to operate with a similar end goal, to save the interpolation, but operates on a different system, including how it interpolates.
Instead of using neighboring pixels in a raster format, it uses vertical and horizontal ribbons, and a different way of interpolating. Instead of using neighboring pixels in a raster format, it uses vertical and horizontal ribbons, and a different way of interpolating.
The ribbons alternate, going between a row that is just saved and one that is not saved but is later interpolated. The ribbons alternate, going between a row that is just saved and one that is not saved but is later interpolated.
In this way it is filling in the gaps of an already robust image and saving that finer detail. In this way it is filling in the gaps of an already robust image and saving the finer details.
It should show an increase in speed but not in overall compression. This other method could possibily show an increase in speed but not likely in overall compression.
This will not have the same benefit as ours since ours uses interpolation on almost the entire image, instead of just parts, optimizing over the larger amount of saved error values. This will not have the same benefit as ours since ours uses interpolation on almost the entire image, instead of just parts, optimizing over a larger amount of data.
This paper is also similar to ``Iterative polynomial interpolation and data compression'' \cite{Dahlen1993}, where the researchers did a similar approach but with different shapes. This paper is also similar to ``Iterative polynomial interpolation and data compression'' \cite{Dahlen1993}, where the researchers did a similar approach but with different shapes.
The error numbers were still saved, but they used specifically polynomial interpretation which we did not see fit to use in ours. The error numbers were still saved, but they used specifically polynomial interpretation which we did not see fit to use in ours.
The closest method is ``Near-lossless image compression by relaxation-labelled prediction'' \cite{AIAZZI20021619} which has similarity with the general principles of the interpolation and encoding. The closest method is ``Near-lossless image compression by relaxation-labelled prediction'' \cite{AIAZZI20021619} which has similarity with the general principles of the interpolation and encoding.
The algorithm detailed in the paper uses a clustering algorithm of the nearby points to create the interpolation, saving the errors in order to retrieve the original later. The algorithm detailed in the paper uses a clustering algorithm of the nearby points to create the interpolation, saving the errors in order to retrieve the original later.
This method is much more complex, not using a direct interpolation method but instead using a clustering algorithm to find the next point. This method is much more complex, not using a direct interpolation method but instead using a clustering algorithm to find the next point.
This could potentially have an advantage by using more points in the process, but the implementation becomes too complicated and may lose value. This could potentially have an advantage by using more points in the process, but the implementation becomes too complicated and may lose value.
The goal for us was to have a simple and efficient encoding operation, and this would have too many things to process. The goal for us was to have a simple and efficient encoding operation, and this would have too many things to process.
It also has a binning system based off of the mean square prediction error, but which bin it goes into can shift over the classification process adding to the complexity of the algorithm. It also has a binning system like ours, with theirs based off of the mean square prediction error.
The problem is that which bin it goes into can shift over the classification process adding to the complexity of the algorithm.
The use of more points could have been implemented into ours too but we chose not to due to the potential additional temporal complexity. The use of more points could have been implemented into ours too but we chose not to due to the potential additional temporal complexity.
\section{The Approach} \section{The Approach}
To begin, the border values are encoded into the system starting with the first value. To begin, the border values are encoded into the system starting with the first value.
The values after that are just modifications from the first value. The values after that are just modifications from the first value.
There are not many values here and the algorithm needs a place to start. There are not many values here and the algorithm needs a place to start.
Other things could have been done but they would have raised temporal complexity with marginal gain. Alternate things could have been done but they would have raised temporal complexity with marginal gain.
Once the middle points are reached, the pixel to the left, top left, directly above, and top right have already been read in. Once the middle points are reached, the pixel to the left, top left, directly above, and top right have already been read in.
Each of these values is given a point in the x-y plane, with the top left at (-1,1), top pixel at (0,1), top right pixel at (1,1), and the middle left pixel at (-1,0), giving the target (0,0). Each of these values is given a point in the x-y plane, with the top left at (-1,1), top pixel at (0,1), top right pixel at (1,1), and the middle left pixel at (-1,0), giving the target (0,0).
Using the formula for a plane in 3D ($ax + by + c = z$) we get the system of equations Using the formula for a plane in 3D ($ax + by + c = z$) we get the system of equations
...@@ -230,7 +230,7 @@ $$ ...@@ -230,7 +230,7 @@ $$
The new matrix is full rank and can therefore be solved using \verb|numpy.linalg.solve| \cite{Numpy}. The new matrix is full rank and can therefore be solved using \verb|numpy.linalg.solve| \cite{Numpy}.
The x that results corresponds to two values followed by the original $c$ from the $ax+by+c=z$ form, which is the predicted pixel value. The x that results corresponds to two values followed by the original $c$ from the $ax+by+c=z$ form, which is the predicted pixel value.
Huffman encoding performs well on data with varying frequency \cite{Huffman}, which makes saving the error numbers a good candidate for using it. Huffman encoding performs well on data with varying frequency \cite{Huffman}, which makes it a good candidate for saving the error numbers.
Most pixels will be off by low numbers since many objects have close to uniform surface temperature or have an almost uniform temperature gradient. Most pixels will be off by low numbers since many objects have close to uniform surface temperature or have an almost uniform temperature gradient.
\begin{figure}[h] \begin{figure}[h]
...@@ -260,14 +260,14 @@ An average number between all of them was chosen, since using the average versus ...@@ -260,14 +260,14 @@ An average number between all of them was chosen, since using the average versus
We attained an average compression ratio of $0.4057$ on a set of 262 images, with compression ratios ranging from $0.3685$ to $0.4979$. We attained an average compression ratio of $0.4057$ on a set of 262 images, with compression ratios ranging from $0.3685$ to $0.4979$.
Because the system as it stands runs off of a saved dictionary, it is better to think of the system as a cross between individual compression and a larger archival tool. Because the system as it stands runs off of a saved dictionary, it is better to think of the system as a cross between an individual compression system and a larger archival tool.
This means that there are large changes in compression ratios depending on how many files are compressed at a time, despite the ability to decompress files individually. This means that there are large changes in compression ratios depending on how many files are compressed at a time, despite the ability to decompress files individually.
When the size of the saved dictionary was included, the compression ratio on the entire set only changed from $0.4043$ to $0.4057$. However, when tested on just the first image in the set, it went from $0.3981$ to $0.7508$. When the size of the saved dictionary was included, the compression ratio on the entire set only changed from $0.4043$ to $0.4057$. However, when tested on just the first image in the set, it went from $0.3981$ to $0.7508$.
This is not a permanent issue, as changes to the system can be made to fix this. This is not a permanent issue, as changes to the method can be made to fix this.
These are detailed in the discussion section below. These are detailed in the discussion section below.
We are using it on a set of at least 16 images, so this does not affect us as much. This was tested on a set of a least 16 images, so this does not affect us as much.
When tested on a random set of 16 images, the ratio only changed from $0.3973$ to $0.4193$. When tested on a random set of 16 images, the ratio only changed from $0.3973$ to $0.4193$.
\begin{tabular}{ |p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}| } \begin{tabular}{ |p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}| }
\hline \hline
...@@ -280,30 +280,30 @@ When tested on a random set of 16 images, the ratio only changed from $0.3973$ t ...@@ -280,30 +280,30 @@ When tested on a random set of 16 images, the ratio only changed from $0.3973$ t
\hline \hline
\end{tabular} \end{tabular}
The created file system together created files that are on average 33.7\% smaller than PNG and 34.5\% smaller than LWZ compression on TIFF. Our method created files that are on average 33.7\% smaller than PNG and 34.5\% smaller than LWZ compression on TIFF.
\section{Discussion} \section{Discussion}
The files produced through this method are much smaller than the others tested but at great computational costs. The files produced through this method are much smaller than the others, but this comes at great computational costs.
PNG compression is several orders of magnitude faster than the code that was used in this project. PNG compression was several orders of magnitude faster on the local machine than the method that was used in this project.
Using a compiled language instead of python will increase the speed but there are other improvements that could be made. Using a compiled language instead of python will increase the speed substantially, but there are other improvements that can be made.
Part of the problem with the speed was the \verb|numpy.linalg.solve| \cite{Numpy} function, which is not the fastest way to solve the system.
This method operates in $O(N^3)$ \cite{LAPACKAlgorithms} for an $N\times N$ matrix, while more recent algorithms have placed it at $O(n^{2.37286})$ \cite{DBLP:journals/corr/abs-2010-05846} The issue with \verb|numpy.linalg.solve| was later addressed to fix the potential slowdown, but calculating the inverse beforehand and using that in the system had marginal temporal benefit.
Using an approximation could be helpful. \verb|numpy.linalg.solve| runs in $O(N^3)$ for an $N\times N$ matrix, while the multiplication runs in a similar time. \cite{LAPACKAlgorithms}
Although it is potentially lossy, it would greatly improve computational complexity. The least squares method mentioned in this project also has a shortcoming, but this one cannot be solved as easily.
The least squares method mentioned in this project also has the same shortcoming. The psudoinverse can be calculated beforehand, but the largest problem is that it is solving the system for every pixel individually and calculating the norm.
It runs in $O(N^3)$ for a similar $N\times N$ matrix \cite{LeastSquaredProblem}. \verb|numpy.linalg.lstsq| itself runs in $O(N^3)$ for an $N\times N$ matrix \cite{LeastSquaredProblem}, while the psudoinverse when implemented uses more python runtime, adding to temporal complexity.
This compression suffers greatly when it is only used on individual images, which is not a problem for the project it was designed for. This compression suffers greatly when it is only used on individual images, which is not a problem for the project it was tested on.
The camera that this compression was built for has 16 image sensors that work simultaneously. The test images came from a camera that has 16 image sensors that work simultaneously.
They work in 100 image increments and therefore create large packets that can be saved together, while still having the functionality of decompressing individually. The camera works in multiple image increments and therefore creates large packets that can be saved together, while still having the functionality of decompressing individually.
This saves greatly on the memory that is required to view an image. This saves greatly on the memory that is required to view an image.
It was therefore not seen necessary to create a different system to compress individual files as individual images are not created. It was therefore not seen necessary to create a different system to compress individual files as individual images are not created.
A potential workaround for this problem would be to code extraneous values into the image directly instead of adding them to the full dictionary. A potential workaround for this problem would be to code extraneous values into the image directly instead of adding them to the full dictionary.
This has the downside of not being able to integrate perfectly with Huffman encoding. This has the downside of not being able to integrate perfectly with Huffman encoding.
A leaf of the tree would have to be a trigger to not use Huffman encoding anymore and use an alternate system to read in the bits. A leaf of the tree would have to be a trigger to not use Huffman encoding anymore and use an alternate system to read in the bits.
We chose not to do this but it would be a simple operation for someone with a different use case. We did not to do this, but it would be a simple change for someone with a different use case.
{\small {\small
\bibliographystyle{ieee} \bibliographystyle{ieee}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment