Commit c2f2988f authored by Bryce Hepner's avatar Bryce Hepner

Bib change

parent 820087f4
Pipeline #2570 passed with stage
in 7 seconds
...@@ -33,14 +33,15 @@ ...@@ -33,14 +33,15 @@
\citation{LZW} \citation{LZW}
\citation{PNGdetails} \citation{PNGdetails}
\citation{ABRARDO1997321} \citation{ABRARDO1997321}
\citation{Dahlen1993}
\citation{AIAZZI20021619} \citation{AIAZZI20021619}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}LZW}{2}{subsection.2.2}\protected@file@percent } \@writefile{toc}{\contentsline {subsection}{\numberline {2.2}LZW}{2}{subsection.2.2}\protected@file@percent }
\@writefile{brf}{\backcite{LZW}{{2}{2.2}{subsection.2.2}}} \@writefile{brf}{\backcite{LZW}{{2}{2.2}{subsection.2.2}}}
\@writefile{brf}{\backcite{PNGdetails}{{2}{2.2}{subsection.2.2}}} \@writefile{brf}{\backcite{PNGdetails}{{2}{2.2}{subsection.2.2}}}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}A Method to Save Some of the Interpolated Errors}{2}{subsection.2.3}\protected@file@percent } \@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Similar Methods}{2}{subsection.2.3}\protected@file@percent }
\@writefile{brf}{\backcite{ABRARDO1997321}{{2}{2.3}{subsection.2.3}}} \@writefile{brf}{\backcite{ABRARDO1997321}{{2}{2.3}{subsection.2.3}}}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.4}A Method of Interpolation by Clustering}{2}{subsection.2.4}\protected@file@percent } \@writefile{brf}{\backcite{Dahlen1993}{{2}{2.3}{subsection.2.3}}}
\@writefile{brf}{\backcite{AIAZZI20021619}{{2}{2.4}{subsection.2.4}}} \@writefile{brf}{\backcite{AIAZZI20021619}{{2}{2.3}{subsection.2.3}}}
\@writefile{toc}{\contentsline {section}{\numberline {3}The Approach}{2}{section.3}\protected@file@percent } \@writefile{toc}{\contentsline {section}{\numberline {3}The Approach}{2}{section.3}\protected@file@percent }
\citation{Numpy} \citation{Numpy}
\citation{Huffman} \citation{Huffman}
...@@ -64,11 +65,12 @@ ...@@ -64,11 +65,12 @@
\bibcite{DBLP:journals/corr/abs-2010-05846}{3} \bibcite{DBLP:journals/corr/abs-2010-05846}{3}
\bibcite{LeastSquaredProblem}{4} \bibcite{LeastSquaredProblem}{4}
\bibcite{LAPACKAlgorithms}{5} \bibcite{LAPACKAlgorithms}{5}
\bibcite{PNGdetails}{6} \bibcite{Dahlen1993}{6}
\bibcite{Numpy}{7} \bibcite{PNGdetails}{7}
\bibcite{Huffman}{8} \bibcite{Numpy}{8}
\bibcite{PNGoverview}{9} \bibcite{Huffman}{9}
\bibcite{LZW}{10} \bibcite{PNGoverview}{10}
\bibcite{LZW}{11}
\@writefile{toc}{\contentsline {section}{\numberline {5}Discussion}{4}{section.5}\protected@file@percent } \@writefile{toc}{\contentsline {section}{\numberline {5}Discussion}{4}{section.5}\protected@file@percent }
\@writefile{brf}{\backcite{Numpy}{{4}{5}{section.5}}} \@writefile{brf}{\backcite{Numpy}{{4}{5}{section.5}}}
\@writefile{brf}{\backcite{LAPACKAlgorithms}{{4}{5}{section.5}}} \@writefile{brf}{\backcite{LAPACKAlgorithms}{{4}{5}{section.5}}}
......
...@@ -29,6 +29,11 @@ S.~Blackford. ...@@ -29,6 +29,11 @@ S.~Blackford.
\newblock \url{http://www.netlib.org/lapack/lug/node71.html}, Oct. 1999. \newblock \url{http://www.netlib.org/lapack/lug/node71.html}, Oct. 1999.
\newblock Accessed: 2022-6-23. \newblock Accessed: 2022-6-23.
\bibitem{Dahlen1993}
M.~D{\AE}hlen and M.~Floater.
\newblock Iterative polynomial interpolation and data compression.
\newblock {\em Numerical Algorithms}, 5(3):165--177, Mar 1993.
\bibitem{PNGdetails} \bibitem{PNGdetails}
L.~P. Deutsch. L.~P. Deutsch.
\newblock {DEFLATE Compressed Data Format Specification version 1.3}. \newblock {DEFLATE Compressed Data Format Specification version 1.3}.
......
...@@ -155,6 +155,23 @@ abstract = {This paper describes a differential pulse code modulation scheme sui ...@@ -155,6 +155,23 @@ abstract = {This paper describes a differential pulse code modulation scheme sui
} }
@Article{Dahlen1993,
author={D{\AE}hlen, Morten
and Floater, Michael},
title={Iterative polynomial interpolation and data compression},
journal={Numerical Algorithms},
year={1993},
month={Mar},
day={01},
volume={5},
number={3},
pages={165-177},
abstract={In this paper we look at some iterative interpolation schemes and investigate how they may be used in data compression. In particular, we use the pointwise polynomial interpolation method to decompose discrete data into a sequence of difference vectors. By compressing these differences, one can store an approximation to the data within a specified tolerance using a fraction of the original storage space (the larger the tolerance, the smaller the fraction).},
issn={1572-9265},
doi={10.1007/BF02215679},
url={https://doi.org/10.1007/BF02215679}
}
...@@ -3,44 +3,44 @@ Capacity: max_strings=200000, hash_size=200000, hash_prime=170003 ...@@ -3,44 +3,44 @@ Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
The top-level auxiliary file: main.aux The top-level auxiliary file: main.aux
The style file: ieee.bst The style file: ieee.bst
Database file #1: main.bib Database file #1: main.bib
You've used 10 entries, You've used 11 entries,
2120 wiz_defined-function locations, 2120 wiz_defined-function locations,
574 strings with 6177 characters, 583 strings with 6326 characters,
and the built_in function-call counts, 3544 in all, are: and the built_in function-call counts, 3869 in all, are:
= -- 324 = -- 356
> -- 230 > -- 243
< -- 0 < -- 0
+ -- 95 + -- 100
- -- 82 - -- 86
* -- 281 * -- 308
:= -- 630 := -- 684
add.period$ -- 33 add.period$ -- 36
call.type$ -- 10 call.type$ -- 11
change.case$ -- 71 change.case$ -- 76
chr.to.int$ -- 0 chr.to.int$ -- 0
cite$ -- 10 cite$ -- 11
duplicate$ -- 96 duplicate$ -- 107
empty$ -- 237 empty$ -- 264
format.name$ -- 82 format.name$ -- 86
if$ -- 701 if$ -- 769
int.to.chr$ -- 0 int.to.chr$ -- 0
int.to.str$ -- 10 int.to.str$ -- 11
missing$ -- 7 missing$ -- 8
newline$ -- 56 newline$ -- 61
num.names$ -- 20 num.names$ -- 22
pop$ -- 69 pop$ -- 72
preamble$ -- 1 preamble$ -- 1
purify$ -- 61 purify$ -- 65
quote$ -- 0 quote$ -- 0
skip$ -- 70 skip$ -- 78
stack$ -- 0 stack$ -- 0
substring$ -- 165 substring$ -- 191
swap$ -- 10 swap$ -- 11
text.length$ -- 0 text.length$ -- 0
text.prefix$ -- 0 text.prefix$ -- 0
top$ -- 0 top$ -- 0
type$ -- 40 type$ -- 44
warning$ -- 0 warning$ -- 0
while$ -- 26 while$ -- 29
width$ -- 12 width$ -- 13
write$ -- 115 write$ -- 126
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
\backcite {LZW}{{2}{2.2}{subsection.2.2}} \backcite {LZW}{{2}{2.2}{subsection.2.2}}
\backcite {PNGdetails}{{2}{2.2}{subsection.2.2}} \backcite {PNGdetails}{{2}{2.2}{subsection.2.2}}
\backcite {ABRARDO1997321}{{2}{2.3}{subsection.2.3}} \backcite {ABRARDO1997321}{{2}{2.3}{subsection.2.3}}
\backcite {AIAZZI20021619}{{2}{2.4}{subsection.2.4}} \backcite {Dahlen1993}{{2}{2.3}{subsection.2.3}}
\backcite {AIAZZI20021619}{{2}{2.3}{subsection.2.3}}
\backcite {Numpy}{{3}{3}{section.3}} \backcite {Numpy}{{3}{3}{section.3}}
\backcite {Huffman}{{3}{3}{section.3}} \backcite {Huffman}{{3}{3}{section.3}}
\backcite {Numpy}{{3}{3}{figure.caption.3}} \backcite {Numpy}{{3}{3}{figure.caption.3}}
......
This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2020.7.20) 27 JUN 2022 14:56 This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2020.7.20) 27 JUN 2022 15:53
entering extended mode entering extended mode
restricted \write18 enabled. restricted \write18 enabled.
%&-line parsing enabled. %&-line parsing enabled.
...@@ -404,7 +404,7 @@ LaTeX Font Warning: Font shape `OMS/cmtt/m/n' undefined ...@@ -404,7 +404,7 @@ LaTeX Font Warning: Font shape `OMS/cmtt/m/n' undefined
(Font) using `OMS/cmsy/m/n' instead (Font) using `OMS/cmsy/m/n' instead
(Font) for symbol `textbraceleft' on input line 91. (Font) for symbol `textbraceleft' on input line 91.
<PixelArrangement.png, id=48, 130.55226pt x 86.724pt> <PixelArrangement.png, id=44, 130.55226pt x 86.724pt>
File: PixelArrangement.png Graphic file (type png) File: PixelArrangement.png Graphic file (type png)
<use PixelArrangement.png> <use PixelArrangement.png>
Package pdftex.def Info: PixelArrangement.png used on input line 112. Package pdftex.def Info: PixelArrangement.png used on input line 112.
...@@ -412,41 +412,38 @@ Package pdftex.def Info: PixelArrangement.png used on input line 112. ...@@ -412,41 +412,38 @@ Package pdftex.def Info: PixelArrangement.png used on input line 112.
[1{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map} [1{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}
<./PixelArrangement.png (PNG copy)>] <./PixelArrangement.png (PNG copy)>] [2]
Underfull \vbox (badness 1931) has occurred while \output is active [] <Uniform_No_Title.png, id=84, 462.528pt x 346.896pt>
[2]
<Uniform_No_Title.png, id=86, 462.528pt x 346.896pt>
File: Uniform_No_Title.png Graphic file (type png) File: Uniform_No_Title.png Graphic file (type png)
<use Uniform_No_Title.png> <use Uniform_No_Title.png>
Package pdftex.def Info: Uniform_No_Title.png used on input line 231. Package pdftex.def Info: Uniform_No_Title.png used on input line 238.
(pdftex.def) Requested size: 237.13594pt x 177.8515pt. (pdftex.def) Requested size: 237.13594pt x 177.8515pt.
<Normal_No_Title.png, id=88, 462.528pt x 346.896pt> <Normal_No_Title.png, id=86, 462.528pt x 346.896pt>
File: Normal_No_Title.png Graphic file (type png) File: Normal_No_Title.png Graphic file (type png)
<use Normal_No_Title.png> <use Normal_No_Title.png>
Package pdftex.def Info: Normal_No_Title.png used on input line 237. Package pdftex.def Info: Normal_No_Title.png used on input line 244.
(pdftex.def) Requested size: 237.13594pt x 177.8515pt. (pdftex.def) Requested size: 237.13594pt x 177.8515pt.
LaTeX Warning: `h' float specifier changed to `ht'. LaTeX Warning: `h' float specifier changed to `ht'.
Underfull \hbox (badness 4713) in paragraph at lines 263--275 Underfull \hbox (badness 4713) in paragraph at lines 270--282
[]\OT1/cmr/m/n/10 We are us-ing it on a set of at least 16 []\OT1/cmr/m/n/10 We are us-ing it on a set of at least 16
[] []
Underfull \hbox (badness 5161) in paragraph at lines 263--275 Underfull \hbox (badness 5161) in paragraph at lines 270--282
\OT1/cmr/m/n/10 im-ages, so this does not af-fect us as much. \OT1/cmr/m/n/10 im-ages, so this does not af-fect us as much.
[] []
Underfull \hbox (badness 4353) in paragraph at lines 263--275 Underfull \hbox (badness 4353) in paragraph at lines 270--282
\OT1/cmr/m/n/10 When tested on a ran-dom set of 16 im-ages, \OT1/cmr/m/n/10 When tested on a ran-dom set of 16 im-ages,
[] []
Underfull \hbox (badness 3428) in paragraph at lines 263--275 Underfull \hbox (badness 3428) in paragraph at lines 270--282
\OT1/cmr/m/n/10 the ra-tio only changed from $0\OML/cmm/m/it/10 :\OT1/cmr/m/n/1 \OT1/cmr/m/n/10 the ra-tio only changed from $0\OML/cmm/m/it/10 :\OT1/cmr/m/n/1
0 3973$ to $0\OML/cmm/m/it/10 :\OT1/cmr/m/n/10 4193$. 0 3973$ to $0\OML/cmm/m/it/10 :\OT1/cmr/m/n/10 4193$.
[] []
...@@ -462,30 +459,30 @@ Underfull \hbox (badness 7362) in paragraph at lines 31--31 ...@@ -462,30 +459,30 @@ Underfull \hbox (badness 7362) in paragraph at lines 31--31
[] []
) )
Package atveryend Info: Empty hook `BeforeClearDocument' on input line 307. Package atveryend Info: Empty hook `BeforeClearDocument' on input line 314.
[4] [4]
Package atveryend Info: Empty hook `AfterLastShipout' on input line 307. Package atveryend Info: Empty hook `AfterLastShipout' on input line 314.
(./main.aux) (./main.aux)
Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 307. Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 314.
\snap@out=\write5 \snap@out=\write5
\openout5 = `main.dep'. \openout5 = `main.dep'.
Dependency list written on main.dep. Dependency list written on main.dep.
Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 307. Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 314.
Package rerunfilecheck Info: File `main.out' has not changed. Package rerunfilecheck Info: File `main.out' has not changed.
(rerunfilecheck) Checksum: 678B3FD8ACEC81A3FDC0098488E26BA3;645. (rerunfilecheck) Checksum: 90A24BEB086706678095977998C56209;523.
Package rerunfilecheck Info: File `main.brf' has not changed. Package rerunfilecheck Info: File `main.brf' has not changed.
(rerunfilecheck) Checksum: B884F516D3798BA29F2253029CE65F80;666. (rerunfilecheck) Checksum: 88896D3D6F5D891BCB4221968D720CD5;715.
LaTeX Font Warning: Some font shapes were not available, defaults substituted. LaTeX Font Warning: Some font shapes were not available, defaults substituted.
) )
Here is how much of TeX's memory you used: Here is how much of TeX's memory you used:
8440 strings out of 481239 8443 strings out of 481239
129191 string characters out of 5920377 129230 string characters out of 5920377
402171 words of memory out of 5000000 402204 words of memory out of 5000000
23518 multiletter control sequences out of 15000+600000 23521 multiletter control sequences out of 15000+600000
541812 words of font info for 57 fonts, out of 8000000 for 9000 541812 words of font info for 57 fonts, out of 8000000 for 9000
1142 hyphenation exceptions out of 8191 1142 hyphenation exceptions out of 8191
47i,9n,42p,782b,389s stack positions out of 5000i,500n,10000p,200000b,80000s 47i,9n,42p,782b,389s stack positions out of 5000i,500n,10000p,200000b,80000s
...@@ -504,10 +501,10 @@ amsfonts/cm/cmr7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts ...@@ -504,10 +501,10 @@ amsfonts/cm/cmr7.pfb></usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts
share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti9.pfb></usr/share/t share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti9.pfb></usr/share/t
exlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt10.pfb></usr/share/texlive exlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt10.pfb></usr/share/texlive
/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt9.pfb> /texmf-dist/fonts/type1/public/amsfonts/cm/cmtt9.pfb>
Output written on main.pdf (4 pages, 255928 bytes). Output written on main.pdf (4 pages, 255632 bytes).
PDF statistics: PDF statistics:
190 PDF objects out of 1000 (max. 8388607) 189 PDF objects out of 1000 (max. 8388607)
161 compressed objects within 2 object streams 160 compressed objects within 2 object streams
29 named destinations out of 1000 (max. 500000) 29 named destinations out of 1000 (max. 500000)
104 words of extra memory for PDF output out of 10000 (max. 10000000) 96 words of extra memory for PDF output out of 10000 (max. 10000000)
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
\BOOKMARK [1][-]{section.2}{Related Work}{}% 4 \BOOKMARK [1][-]{section.2}{Related Work}{}% 4
\BOOKMARK [2][-]{subsection.2.1}{PNG}{section.2}% 5 \BOOKMARK [2][-]{subsection.2.1}{PNG}{section.2}% 5
\BOOKMARK [2][-]{subsection.2.2}{LZW}{section.2}% 6 \BOOKMARK [2][-]{subsection.2.2}{LZW}{section.2}% 6
\BOOKMARK [2][-]{subsection.2.3}{A Method to Save Some of the Interpolated Errors}{section.2}% 7 \BOOKMARK [2][-]{subsection.2.3}{Similar Methods}{section.2}% 7
\BOOKMARK [2][-]{subsection.2.4}{A Method of Interpolation by Clustering}{section.2}% 8 \BOOKMARK [1][-]{section.3}{The Approach}{}% 8
\BOOKMARK [1][-]{section.3}{The Approach}{}% 9 \BOOKMARK [1][-]{section.4}{Results}{}% 9
\BOOKMARK [1][-]{section.4}{Results}{}% 10 \BOOKMARK [1][-]{section.5}{Discussion}{}% 10
\BOOKMARK [1][-]{section.5}{Discussion}{}% 11
No preview for this file type
No preview for this file type
...@@ -135,8 +135,8 @@ Our algorithm has a similar use of Huffman encoding, but a completely different ...@@ -135,8 +135,8 @@ Our algorithm has a similar use of Huffman encoding, but a completely different
LZ77 seeks patterns between blocks while ours has no block structure and no explicit pattern functionality. LZ77 seeks patterns between blocks while ours has no block structure and no explicit pattern functionality.
Ours uses the equivalent block size of 1, and instead of encoding the data it encodes alternate information which is used to compress. Ours uses the equivalent block size of 1, and instead of encoding the data it encodes alternate information which is used to compress.
\subsection{LZW} \subsection{LZW}
LZW operates differently by created a separate code table that maps every sequence to a code. LZW operates differently by creating a separate code table that maps every sequence to a code.
Although this is used for an image, the original paper explains it through text examples which will be done here as well \cite {LZW}. Although this is used for an image, the original paper by Welch \cite {LZW} explains it through text examples which will be done here as well .
Instead of looking at each character individually, it looks at variable length string chains and compresses those. Instead of looking at each character individually, it looks at variable length string chains and compresses those.
Passing through the items to be compressed, if a phrase has already been encountered, it saves the reference to the original phrase along with the next character in sequence. Passing through the items to be compressed, if a phrase has already been encountered, it saves the reference to the original phrase along with the next character in sequence.
In this way, the longer repeated phrases are automatically found and can be compressed to be smaller. In this way, the longer repeated phrases are automatically found and can be compressed to be smaller.
...@@ -145,27 +145,34 @@ This system also uses blocks like PNG in order to save patterns in the data, but ...@@ -145,27 +145,34 @@ This system also uses blocks like PNG in order to save patterns in the data, but
Ours, similarly to PNG, only looks at a short portion of the data, which may have an advantage over LZW for images. Ours, similarly to PNG, only looks at a short portion of the data, which may have an advantage over LZW for images.
Images generally do not have the same patterns that text does, so it may be advantageous to not use the entire corpus in compressing an image and instead only evaluate it based off of nearby objects. Images generally do not have the same patterns that text does, so it may be advantageous to not use the entire corpus in compressing an image and instead only evaluate it based off of nearby objects.
The blue parts of the sky will be next to other blue parts of the sky, and in the realm of thermal images, objects will probably be most similar to nearby ones in temperature due to how heat flows. The blue parts of the sky will be next to other blue parts of the sky, and in the realm of thermal images, objects will probably be most similar to nearby ones in temperature due to how heat flows.
\subsection{A Method to Save Some of the Interpolated Errors} \subsection{Similar Methods}
No projects or papers are very similar to the ideas expressed in this paper, especially not for 16 bit thermal images. Our research did not find any very similar approaches, especially with 16-bit thermal images.
One paper that comes close is ``Encoding-interleaved hierarchical interpolation for lossless image compression'' \cite{ABRARDO1997321}. One paper that comes close is ``Encoding-interleaved hierarchical interpolation for lossless image compression'' \cite{ABRARDO1997321}.
This method seems to operate with a similar end goal, to save the interpolation, but operates on a different system, including how it interpolates. This method seems to operate with a similar end goal, to save the interpolation, but operates on a different system, including how it interpolates.
Instead of using neighboring pixels in a raster format, it uses vertical and horizontal ribbons, and a different way of interpolating. Instead of using neighboring pixels in a raster format, it uses vertical and horizontal ribbons, and a different way of interpolating.
The ribbons alternate, going between a row that is just saved and one that is not saved but is later interpolated. The ribbons alternate, going between a row that is just saved and one that is not saved but is later interpolated.
In this way it is filling in the gaps of an already robust image and saving that finer detail. In this way it is filling in the gaps of an already robust image and saving that finer detail.
It should show an increase in speed but not in overall compression. It should show an increase in speed but not in overall compression.
This will not have the same benefit as ours as ours uses interpolation on almost the entire image, instead of just parts, optimizing over the larger amount of saved error values. This will not have the same benefit as ours since ours uses interpolation on almost the entire image, instead of just parts, optimizing over the larger amount of saved error values.
\subsection{A Method of Interpolation by Clustering}
This paper is also similar to ``Iterative polynomial interpolation and data compression'' \cite{Dahlen1993}, where the researchers did a similar approach but with different shapes.
The error numbers were still saved, but they used specifically polynomial interpretation which we did not see fit to use in ours.
The closest method is ``Near-lossless image compression by relaxation-labelled prediction'' \cite{AIAZZI20021619} which has similarity with the general principles of the interpolation and encoding. The closest method is ``Near-lossless image compression by relaxation-labelled prediction'' \cite{AIAZZI20021619} which has similarity with the general principles of the interpolation and encoding.
The algorithm detailed in the paper uses a clustering algorithm of the nearby points to create the interpolation, saving the errors in order to retrieve the original later. The algorithm detailed in the paper uses a clustering algorithm of the nearby points to create the interpolation, saving the errors in order to retrieve the original later.
This method is much more complex, not using a direct interpolation method but instead using a clustering algorithm to find the next point. This method is much more complex, not using a direct interpolation method but instead using a clustering algorithm to find the next point.
This could potentially have an advantage by using more points in the process, but the implementation becomes too complicated and may lose value. This could potentially have an advantage by using more points in the process, but the implementation becomes too complicated and may lose value.
The goal for us was to have a simple and efficient encoding operation, and this would have too many things to process.
It also has a binning system based off of the mean square prediction error, but which bin it goes into can shift over the classification process adding to the complexity of the algorithm. It also has a binning system based off of the mean square prediction error, but which bin it goes into can shift over the classification process adding to the complexity of the algorithm.
The use of more points could be implemented into ours too, although it would not help the temporal complexity. The use of more points could have been implemented into ours too but we chose not to due to the potential additional temporal complexity.
\section{The Approach} \section{The Approach}
To begin, the border values are encoded into the system. To begin, the border values are encoded into the system starting with the first value.
There are not many values here and the algorithm needs a place to start. The values after that are just modifications from the first value.
There are not many values here and the algorithm needs a place to start.
Other things could have been done but they would have raised temporal complexity with marginal gain.
Once the middle points are reached, the pixel to the left, top left, directly above, and top right have already been read in. Once the middle points are reached, the pixel to the left, top left, directly above, and top right have already been read in.
Each of these values is given a point in the x-y plane, with the top left at (-1,1), top pixel at (0,1), top right pixel at (1,1), and the middle left pixel at (-1,0), giving the target (0,0). Each of these values is given a point in the x-y plane, with the top left at (-1,1), top pixel at (0,1), top right pixel at (1,1), and the middle left pixel at (-1,0), giving the target (0,0).
Using the formula for a plane in 3D ($ax + by + c = z$) we get the system of equations Using the formula for a plane in 3D ($ax + by + c = z$) we get the system of equations
...@@ -240,7 +247,7 @@ Most pixels will be off by low numbers since many objects have close to uniform ...@@ -240,7 +247,7 @@ Most pixels will be off by low numbers since many objects have close to uniform
\end{figure} \end{figure}
In order to control for objects in images that are known to have an unpredictable temperature (fail the cases before), a bin system is used. In order to adjust for objects in images that are known to have an unpredictable temperature (fail the cases before), a bin system is used.
The residuals from \verb|numpy.linalg.lstsq| \cite{Numpy} are used to determine the difference across the 4 known points, which is then used to place it in a category. The residuals from \verb|numpy.linalg.lstsq| \cite{Numpy} are used to determine the difference across the 4 known points, which is then used to place it in a category.
This number is the difference between trying to fit a plane between 4 different points. This number is the difference between trying to fit a plane between 4 different points.
If a plane is able to be drawn that contains all 4 points, it makes sense that the error will be much smaller than if the best fitted plane was not very close to any of the points. If a plane is able to be drawn that contains all 4 points, it makes sense that the error will be much smaller than if the best fitted plane was not very close to any of the points.
...@@ -253,8 +260,8 @@ An average number between all of them was chosen, since using the average versus ...@@ -253,8 +260,8 @@ An average number between all of them was chosen, since using the average versus
We attained an average compression ratio of $0.4057$ on a set of 262 images, with compression ratios ranging from $0.3685$ to $0.4979$. We attained an average compression ratio of $0.4057$ on a set of 262 images, with compression ratios ranging from $0.3685$ to $0.4979$.
Because the system as it stands runs off of a saved dictionary, it is better to think of the system as a cross between individual compression and a larger archive tool. Because the system as it stands runs off of a saved dictionary, it is better to think of the system as a cross between individual compression and a larger archival tool.
This means that there are large changes in compression ratios depending on how many files are compressed, despite the ability to decompress files individually. This means that there are large changes in compression ratios depending on how many files are compressed at a time, despite the ability to decompress files individually.
When the size of the saved dictionary was included, the compression ratio on the entire set only changed from $0.4043$ to $0.4057$. However, when tested on just the first image in the set, it went from $0.3981$ to $0.7508$. When the size of the saved dictionary was included, the compression ratio on the entire set only changed from $0.4043$ to $0.4057$. However, when tested on just the first image in the set, it went from $0.3981$ to $0.7508$.
This is not a permanent issue, as changes to the system can be made to fix this. This is not a permanent issue, as changes to the system can be made to fix this.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment