Commit 596da333 authored by Bryce Hepner's avatar Bryce Hepner

deleted many files and testing code

parent fe1a66d3
...@@ -595,7 +595,7 @@ ...@@ -595,7 +595,7 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "Python 3.8.10 64-bit",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
...@@ -610,6 +610,11 @@ ...@@ -610,6 +610,11 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.10" "version": "3.8.10"
},
"vscode": {
"interpreter": {
"hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
}
} }
}, },
"nbformat": 4, "nbformat": 4,
......
...@@ -172,10 +172,12 @@ def create_testable_images(images, selected_channel, quantity_of_images): ...@@ -172,10 +172,12 @@ def create_testable_images(images, selected_channel, quantity_of_images):
# average_image = Image.fromarray(average_image) # average_image = Image.fromarray(average_image)
sftp_client.close() sftp_client.close()
def save_new_gauss(): def save_new_gauss():
"""\ """\
creates gaussian kernel with side length `l` and a sigma of `sig` creates gaussian kernel with side length `l` and a sigma of `sig`
""" """
# x,y = np.mgrid[-1:1:.003125, -1:1:.003125] # x,y = np.mgrid[-1:1:.003125, -1:1:.003125]
x,y = np.mgrid[-1:1:.44, -1:1:.44] x,y = np.mgrid[-1:1:.44, -1:1:.44]
print(x.shape) print(x.shape)
......
{
"cells": [
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from matplotlib import pyplot as plt\n",
"from itertools import product\n",
"import os\n",
"import sys\n",
"from PIL import Image\n",
"from scipy.optimize import minimize,linprog\n",
"from sklearn.neighbors import KernelDensity\n",
"from collections import Counter\n",
"import numpy.linalg as la"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [],
"source": [
"def file_extractor(dirname=\"images\"):\n",
" files = os.listdir(dirname)\n",
" scenes = []\n",
" for file in files:\n",
" if file == '.DS_Store':\n",
" continue\n",
" else:\n",
" scenes.append(os.path.join(dirname, file))\n",
" return scenes\n",
"\n",
"def image_extractor(scenes):\n",
" image_folder = []\n",
" for scene in scenes:\n",
" files = os.listdir(scene)\n",
" for file in files:\n",
" if file[-5:] != \".tiff\" or file[-7:] == \"_6.tiff\":\n",
" continue\n",
" else:\n",
" image_folder.append(os.path.join(scene, file))\n",
" return image_folder #returns a list of file paths to .tiff files in the specified directory given in file_extractor\n",
"\n",
"def im_distribution(images, num):\n",
" \"\"\"\n",
" Function that extracts tiff files from specific cameras and returns a list of all\n",
" the tiff files corresponding to that camera. i.e. all pictures labeled \"_7.tiff\" or otherwise\n",
" specified camera numbers.\n",
" \n",
" Parameters:\n",
" images (list): list of all tiff files, regardless of classification. This is NOT a list of directories but\n",
" of specific tiff files that can be opened right away. This is the list that we iterate through and \n",
" divide.\n",
" \n",
" num (str): a string designation for the camera number that we want to extract i.e. \"14\" for double digits\n",
" of \"_1\" for single digits.\n",
" \n",
" Returns:\n",
" tiff (list): A list of tiff files that have the specified designation from num. They are the files extracted\n",
" from the 'images' list that correspond to the given num.\n",
" \"\"\"\n",
" tiff = []\n",
" for im in images:\n",
" if im[-7:-5] == num:\n",
" tiff.append(im)\n",
" return tiff"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [],
"source": [
"def predict_pix(tiff_image_path, difference = True):\n",
" \"\"\"\n",
" This function predict the pixel values excluding the boundary.\n",
" Using the 4 neighbor pixel values and MSE to predict the next pixel value\n",
" (-1,1) (0,1) (1,1) => relative position of the 4 other given values\n",
" (-1,0) (0,0) => (0,0) is the one we want to predict\n",
" take the derivative of mean square error to solve for the system of equation \n",
" A = np.array([[3,0,-1],[0,3,3],[1,-3,-4]])\n",
" A @ [a, b, c] = [-z0+z2-z3, z0+z1+z2, -z0-z1-z2-z3] where z0 = (-1,1), z1 = (0,1), z2 = (1,1), z3 = (-1,0)\n",
" and the predicted pixel value is c.\n",
" \n",
" Input:\n",
" tiff_image_path (string): path to the tiff file\n",
" \n",
" Return:\n",
" image ndarray(512 X 640): original image \n",
" predict ndarray(325380,): predicted image excluding the boundary\n",
" diff. ndarray(325380,): IF difference = TRUE, difference between the min and max of four neighbors exclude the boundary\n",
" ELSE: the residuals of the four nearest pixels to a fitted hyperplane\n",
" error ndarray(325380,): difference between the original image and predicted image\n",
" A ndarray(3 X 3): system of equation\n",
" \"\"\"\n",
" image_obj = Image.open(tiff_image_path) #Open the image and read it as an Image object\n",
" image_array = np.array(image_obj)[1:,:].astype(int) #Convert to an array, leaving out the first row because the first row is just housekeeping data\n",
" # image_array = image_array.astype(int) \n",
" A = np.array([[3,0,-1],[0,3,3],[1,-3,-4]]) # the matrix for system of equation\n",
" # where z0 = (-1,1), z1 = (0,1), z2 = (1,1), z3 = (-1,0)\n",
" z0 = image_array[0:-2,0:-2] # get all the first pixel for the entire image\n",
" z1 = image_array[0:-2,1:-1] # get all the second pixel for the entire image\n",
" z2 = image_array[0:-2,2::] # get all the third pixel for the entire image\n",
" z3 = image_array[1:-1,0:-2] # get all the forth pixel for the entire image\n",
" \n",
" # calculate the out put of the system of equation\n",
" y0 = np.ravel(-z0+z2-z3)\n",
" y1 = np.ravel(z0+z1+z2)\n",
" y2 = np.ravel(-z0-z1-z2-z3)\n",
" y = np.vstack((y0,y1,y2))\n",
" \n",
" # use numpy solver to solve the system of equations all at once\n",
" #predict = np.floor(np.linalg.solve(A,y)[-1])\n",
" predict = np.round(np.round((np.linalg.solve(A,y)[-1]),1))\n",
" \n",
" #Matrix system of points that will be used to solve the least squares fitting hyperplane\n",
" points = np.array([[-1,-1,1], [-1,0,1], [-1,1,1], [0,-1,1]])\n",
" \n",
" # flatten the neighbor pixlels and stack them together\n",
" z0 = np.ravel(z0)\n",
" z1 = np.ravel(z1)\n",
" z2 = np.ravel(z2)\n",
" z3 = np.ravel(z3)\n",
" neighbor = np.vstack((z0,z1,z2,z3)).T\n",
" \n",
" if difference:\n",
" # calculate the difference\n",
" diff = np.max(neighbor,axis = 1) - np.min(neighbor, axis=1)\n",
" \n",
" else:\n",
" #Compute the best fitting hyperplane using least squares\n",
" #The res is the residuals of the four points used to fit the hyperplane (summed distance of each of the \n",
" #points to the hyperplane), it is a measure of gradient\n",
" f, diff, rank, s = la.lstsq(points, neighbor.T, rcond=None)\n",
" diff = diff.astype(int)\n",
" \n",
" # calculate the error\n",
" error = np.ravel(image_array[1:-1,1:-1])-predict\n",
" \n",
" return image_array, diff, error"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [],
"source": [
"\"\"\"\n",
"this huffman encoding code is found online\n",
"https://favtutor.com/blogs/huffman-coding\n",
"\"\"\"\n",
"\n",
"class NodeTree(object):\n",
" def __init__(self, left=None, right=None):\n",
" self.left = left\n",
" self.right = right\n",
"\n",
" def children(self):\n",
" return self.left, self.right\n",
"\n",
" def __str__(self):\n",
" return self.left, self.right\n",
"\n",
"\n",
"def huffman_code_tree(node, binString=''):\n",
" '''\n",
" Function to find Huffman Code\n",
" '''\n",
" if type(node) is str:\n",
" return {node: binString}\n",
" (l, r) = node.children()\n",
" d = dict()\n",
" d.update(huffman_code_tree(l, binString + '0'))\n",
" d.update(huffman_code_tree(r, binString + '1'))\n",
" return d\n",
"\n",
"\n",
"def make_tree(nodes):\n",
" '''\n",
" Function to make tree\n",
" :param nodes: Nodes\n",
" :return: Root of the tree\n",
" '''\n",
" while len(nodes) > 1:\n",
" (key1, c1) = nodes[-1]\n",
" (key2, c2) = nodes[-2]\n",
" nodes = nodes[:-2]\n",
" node = NodeTree(key1, key2)\n",
" nodes.append((node, c1 + c2))\n",
" #reverse True, decending order\n",
" nodes = sorted(nodes, key=lambda x: x[1], reverse=True)\n",
" return nodes[0][0]\n",
"def decode_string(huffman_string, the_keys, the_values):\n",
" for i in range(len(huffman_string)):\n",
" try:\n",
" return (int(the_keys[the_values.index(huffman_string[:i+1])]),huffman_string[i+1:])\n",
" except:\n",
" pass\n"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [],
"source": [
"def make_dictionary(tiff_image_path_list, num_bins=4, difference = True):\n",
" \"\"\"\n",
" This function is used to encode the error based on the difference\n",
" and split the difference into different bins\n",
" \n",
" Input:\n",
" tiff_image_path (string): path to the tiff file\n",
" num_bins (int): number of bins\n",
" \n",
" Return:\n",
" huffman_encoding_list list (num_bins + 1): a list of dictionary\n",
" image_array ndarray (512, 640): original image\n",
" new_error ndarray (512, 640): error that includes the boundary\n",
" diff ndarray (510, 638): difference of min and max of the 4 neighbors\n",
" boundary ndarray (2300,): the boundary values after subtracting the very first pixel value\n",
" predict ndarray (325380,): the list of predicted values\n",
" bins list (num_bins - 1,): a list of threshold to cut the bins\n",
" A ndarray (3 X 3): system of equation\n",
" \n",
" \"\"\"\n",
" list_of_all_vals = []\n",
" huffman_encoding_list = []\n",
" for _ in range(num_bins+1):\n",
" list_of_all_vals.append([])\n",
" for _, tiff_image_path in enumerate(tiff_image_path_list):\n",
" # get the image_array, etc\n",
" image_array, diff, error= predict_pix(tiff_image_path, difference)\n",
"\n",
" bins = [30,70,141]\n",
" # get the boundary \n",
" boundary = np.hstack((image_array[0,:],image_array[-1,:],image_array[1:-1,0],image_array[1:-1,-1]))\n",
" \n",
" # take the difference of the boundary with the very first pixel\n",
" boundary = boundary - image_array[0,0]\n",
" \n",
" #boundary is 1dim, so boundary[0] is just the first element\n",
" boundary[0] = image_array[0,0]\n",
" \n",
" # huffman encode the boundary\n",
" for j in boundary:\n",
" list_of_all_vals[0].append(str(j))\n",
" \n",
" # create a list of huffman table\n",
" n = len(bins)\n",
" \n",
" # loop through different bins\n",
" for k in range (0,n):\n",
" # the first bin\n",
" if k == 0 :\n",
" # get the point within the bin and huffman huffman_encoding_dict\n",
" mask = diff <= bins[k]\n",
" for j in error[mask].astype(int):\n",
" list_of_all_vals[k+1].append(str(j))\n",
"\n",
" \n",
" # the middle bins\n",
" else:\n",
" # get the point within the bin and huffman huffman_encoding_dict\n",
" mask = diff > bins[k-1]\n",
" new_error = error[mask]\n",
" mask2 = diff[mask] <= bins[k]\n",
" for j in new_error[mask2].astype(int):\n",
" list_of_all_vals[k+1].append(str(j))\n",
"\n",
" \n",
" # the last bin \n",
" # get the point within the bin and huffman huffman_encoding_dict\n",
" mask = diff > bins[-1]\n",
" for j in error[mask].astype(int):\n",
" list_of_all_vals[-1].append(str(j))\n",
" for item in list_of_all_vals:\n",
" freq = dict(Counter(item))\n",
" freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n",
" node = make_tree(freq)\n",
" huffman_encoding_list.append(huffman_code_tree(node))\n",
" # create a error matrix that includes the boundary (used in encoding matrix)\n",
" new_error = np.copy(image_array)\n",
" new_error[1:-1,1:-1] = np.reshape(error,(510, 638))\n",
" keep = new_error[0,0]\n",
" new_error[0,:] = new_error[0,:] - keep\n",
" new_error[-1,:] = new_error[-1,:] - keep\n",
" new_error[1:-1,0] = new_error[1:-1,0] - keep\n",
" new_error[1:-1,-1] = new_error[1:-1,-1] - keep\n",
" new_error[0,0] = keep\n",
" # huffman_encoding_list = list(set(huffman_encoding_list))\n",
" diff = np.reshape(diff,(510,638))\n",
" # return the huffman dictionary\n",
" return huffman_encoding_list,bins\n"
]
},
{
"cell_type": "code",
"execution_count": 56,
"metadata": {},
"outputs": [],
"source": [
"def huffman(tiff_image_path, num_bins=4, difference = True):\n",
" \"\"\"\n",
" This function is used to encode the error based on the difference\n",
" and split the difference into different bins\n",
" \n",
" Input:\n",
" tiff_image_path (string): path to the tiff file\n",
" num_bins (int): number of bins\n",
" \n",
" Return:\n",
" huffman_encoding_list list (num_bins + 1): a list of dictionary\n",
" image_as_array ndarray (512, 640): original image\n",
" new_error ndarray (512, 640): error that includes the boundary\n",
" diff ndarray (510, 638): difference of min and max of the 4 neighbors\n",
" boundary ndarray (2300,): the boundary values after subtracting the very first pixel value\n",
" predict ndarray (325380,): the list of predicted values\n",
" bins list (num_bins - 1,): a list of threshold to cut the bins\n",
" A ndarray (3 X 3): system of equation\n",
" \"\"\"\n",
" # get the image_as_array, etc\n",
" image_as_array, diff, error= predict_pix(tiff_image_path, difference)\n",
" \n",
" # calculate the number of points that will go in each bin\n",
"\n",
"\n",
" # sort the difference and create the bins\n",
" bins = [30,70,141]\n",
" # get the boundary \n",
" boundary = np.hstack((image_as_array[0,:],image_as_array[-1,:],image_as_array[1:-1,0],image_as_array[1:-1,-1]))\n",
" \n",
" # take the difference of the boundary with the very first pixel\n",
" boundary = boundary - image_as_array[0,0]\n",
" \n",
" #boundary is 1dim, so boundary[0] is just the first element\n",
" boundary[0] = image_as_array[0,0]\n",
" \n",
" # huffman encode the boundary\n",
" bound_vals_as_string = [str(i) for i in boundary]\n",
" freq = dict(Counter(bound_vals_as_string))\n",
" freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n",
" node = make_tree(freq)\n",
" huffman_encoding_dict = huffman_code_tree(node)\n",
" \n",
" # create a list of huffman table\n",
" huffman_encoding_list = [huffman_encoding_dict]\n",
" n = len(bins)\n",
" \n",
" # loop through different bins\n",
" for i in range (0,n):\n",
" # the first bin\n",
" if i == 0 :\n",
" # get the point within the bin and huffman huffman_encoding_dict\n",
" mask = diff <= bins[i]\n",
" line_as_string = [str(i) for i in error[mask].astype(int)]\n",
" freq = dict(Counter(line_as_string))\n",
" freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n",
" node = make_tree(freq)\n",
" huffman_encoding_dict = huffman_code_tree(node)\n",
" huffman_encoding_list.append(huffman_encoding_dict)\n",
" \n",
" # the middle bins\n",
" else:\n",
" # get the point within the bin and huffman huffman_encoding_dict\n",
" mask = diff > bins[i-1]\n",
" new_error = error[mask]\n",
" mask2 = diff[mask] <= bins[i]\n",
" line_as_string = [str(i) for i in new_error[mask2].astype(int)]\n",
" freq = dict(Counter(line_as_string))\n",
" freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n",
" node = make_tree(freq)\n",
" huffman_encoding_dict = huffman_code_tree(node)\n",
" huffman_encoding_list.append(huffman_encoding_dict)\n",
" \n",
" # the last bin \n",
" # get the point within the bin and huffman huffman_encoding_dict\n",
" mask = diff > bins[-1]\n",
" line_as_string = [str(i) for i in error[mask].astype(int)]\n",
" freq = dict(Counter(line_as_string))\n",
" freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)\n",
" node = make_tree(freq)\n",
" huffman_encoding_dict = huffman_code_tree(node)\n",
" huffman_encoding_list.append(huffman_encoding_dict)\n",
"\n",
" # create a error matrix that includes the boundary (used in encoding matrix)\n",
" new_error = np.copy(image_as_array)\n",
" new_error[1:-1,1:-1] = np.reshape(error,(510, 638))\n",
" keep = new_error[0,0]\n",
" new_error[0,:] = new_error[0,:] - keep\n",
" new_error[-1,:] = new_error[-1,:] - keep\n",
" new_error[1:-1,0] = new_error[1:-1,0] - keep\n",
" new_error[1:-1,-1] = new_error[1:-1,-1] - keep\n",
" new_error[0,0] = keep\n",
" # huffman_encoding_list = list(set(huffman_encoding_list))\n",
" diff = np.reshape(diff,(510,638))\n",
"\n",
" return image_as_array, new_error, diff\n"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [],
"source": [
"def encoder(error, list_dic, diff, bins):\n",
" \"\"\"\n",
" This function encode the matrix with huffman coding tables\n",
" \n",
" Input:\n",
" error (512, 640): a matrix with all the errors\n",
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" encoded (512, 640): encoded matrix\n",
" \"\"\"\n",
" returnable_encode = \"\"\n",
" # copy the error matrix (including the boundary)\n",
" encoded = np.copy(error).astype(int).astype(str).astype(object)\n",
" #diff = np.reshape(diff,(510,638))\n",
" # loop through all the pixel to encode\n",
" for i in range(encoded.shape[0]):\n",
" for j in range(encoded.shape[1]):\n",
" if i == 0 or i == encoded.shape[0]-1 or j == 0 or j == encoded.shape[1]-1:\n",
" returnable_encode += list_dic[0][encoded[i][j]]\n",
" elif diff[i-1][j-1] <= bins[0]:\n",
" returnable_encode += list_dic[1][encoded[i][j]]\n",
" elif diff[i-1][j-1] <= bins[1] and diff[i-1][j-1] > bins[0]:\n",
" returnable_encode +=list_dic[2][encoded[i][j]]\n",
" elif diff[i-1][j-1] <= bins[2] and diff[i-1][j-1] > bins[1]:\n",
" returnable_encode +=list_dic[3][encoded[i][j]]\n",
" else:\n",
" returnable_encode += list_dic[4][encoded[i][j]]\n",
" return returnable_encode"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {},
"outputs": [],
"source": [
"# def bitstring_to_bytes(s):\n",
"# v = int(s, 2)\n",
"# b = bytearray()\n",
"# while v:\n",
"# b.append(v & 0xff)\n",
"# v >>= 8\n",
"# return bytes(b[::-1])\n",
"def bitstring_to_bytes(input_string):\n",
" int_array = []\n",
" length_of_string = len(input_string)\n",
" while length_of_string >= 8:\n",
" int_array.append(int(input_string[:8],2))\n",
" input_string = input_string[8:]\n",
" length_of_string = len(input_string)\n",
" if length_of_string > 0:\n",
" zerobuffer = \"\"\n",
" for _ in range(8-length_of_string):\n",
" zerobuffer += \"0\"\n",
" int_array.append(int(input_string+zerobuffer,2))\n",
" return bytes(int_array)\n"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [],
"source": [
"def decoder(encoded_string, list_dic, bins, use_diff):\n",
" \"\"\"\n",
" This function decodes the encoded_matrix.\n",
" Input:\n",
" A (3 X 3): system of equation\n",
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" encoded_matrix (512, 640): encoded matrix\n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" decode_matrix (512, 640): decoded matrix\n",
" \"\"\"\n",
" A = np.array([[3,0,-1],[0,3,3],[1,-3,-4]]) # the matrix for system of equation\n",
" # change the dictionary back to list\n",
" # !!!!!WARNING!!!! has to change this part, everytime you change the number of bins\n",
" the_keys0 = list(list_dic[0].keys())\n",
" the_values0 = list(list_dic[0].values())\n",
" \n",
" the_keys1 = list(list_dic[1].keys())\n",
" the_values1 = list(list_dic[1].values())\n",
" \n",
" the_keys2 = list(list_dic[2].keys())\n",
" the_values2 = list(list_dic[2].values())\n",
" \n",
" the_keys3 = list(list_dic[3].keys())\n",
" the_values3 = list(list_dic[3].values())\n",
" \n",
" the_keys4 = list(list_dic[4].keys())\n",
" the_values4 = list(list_dic[4].values())\n",
" \n",
" #Matrix system of points that will be used to solve the least squares fitting hyperplane\n",
" points = np.array([[-1,-1,1], [-1,0,1], [-1,1,1], [0,-1,1]])\n",
" \n",
" decode_matrix = np.zeros((512,640))\n",
" # loop through all the element in the matrix\n",
" for i in range(decode_matrix.shape[0]):\n",
" for j in range(decode_matrix.shape[1]):\n",
" # if it's the very first pixel on the image\n",
" if i == 0 and j == 0:\n",
" colorvalue, encoded_string = decode_string(encoded_string,the_keys=the_keys0, the_values=the_values0)\n",
" decode_matrix[i][j] = colorvalue\n",
" \n",
" # if it's on the boundary (any of the 4 edges)\n",
" elif i == 0 or i == decode_matrix.shape[0]-1 or j == 0 or j == decode_matrix.shape[1]-1:\n",
" colorvalue, encoded_string = decode_string(encoded_string,the_keys=the_keys0, the_values=the_values0)\n",
" decode_matrix[i][j] = colorvalue + decode_matrix[0][0]\n",
" # if not the boundary\n",
" else:\n",
" # predict the image with the known pixel value\n",
" z0 = decode_matrix[i-1][j-1]\n",
" z1 = decode_matrix[i-1][j]\n",
" z2 = decode_matrix[i-1][j+1]\n",
" z3 = decode_matrix[i][j-1]\n",
" y0 = int(-z0+z2-z3)\n",
" y1 = int(z0+z1+z2)\n",
" y2 = int(-z0-z1-z2-z3)\n",
" y = np.vstack((y0,y1,y2))\n",
" if use_diff:\n",
" difference = max(z0,z1,z2,z3) - min(z0,z1,z2,z3)\n",
" else:\n",
" \n",
" f, difference, rank, s = la.lstsq(points, [z0,z1,z2,z3], rcond=None) \n",
" difference = difference.astype(int)\n",
" \n",
" predict = np.round(np.round(np.linalg.solve(A,y)[-1][0],1))\n",
" # add on the difference by searching the dictionary\n",
" # !!!!!WARNING!!!! has to change this part, eveytime you change the number of bins\n",
" if difference <= bins[0]:\n",
" colorvalue, encoded_string = decode_string(encoded_string,the_keys=the_keys1, the_values=the_values1)\n",
" decode_matrix[i][j] = colorvalue + int(predict)\n",
" elif difference <= bins[1] and difference > bins[0]:\n",
" colorvalue, encoded_string = decode_string(encoded_string,the_keys=the_keys2, the_values=the_values2)\n",
" decode_matrix[i][j] = colorvalue + int(predict)\n",
" elif difference <= bins[2] and difference > bins[1]:\n",
" colorvalue, encoded_string = decode_string(encoded_string,the_keys=the_keys3, the_values=the_values3)\n",
" decode_matrix[i][j] = colorvalue + int(predict)\n",
" else:\n",
" colorvalue, encoded_string = decode_string(encoded_string,the_keys=the_keys4, the_values=the_values4)\n",
" decode_matrix[i][j] = colorvalue + int(predict)\n",
"\n",
" return decode_matrix.astype(int)"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {},
"outputs": [],
"source": [
"def read_from_file(filename):\n",
" with open(filename, 'rb') as file:\n",
" return file.read()"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {},
"outputs": [],
"source": [
"scenes = file_extractor()\n",
"newnamesforlater = []\n",
"images = image_extractor(scenes)\n",
"list_dic, ogbins = make_dictionary(images, 4, False)\n",
"file_size_ratios = []\n",
"np.save(\"first_dic.npy\", list_dic)\n",
"for i in range(125,126):\n",
" image, new_error, diff = huffman(images[i], 4, False)\n",
" encoded_string1 = encoder(new_error, list_dic, diff, ogbins)\n",
" # reconstruct_image = decoder(A, encoded_string, list_dic, bins, False)\n",
" # print(np.allclose(image, reconstruct_image))\n",
" inletters = bitstring_to_bytes(encoded_string1)\n",
" if images[i][:-5] == \".tiff\":\n",
" newname = images[i][:-5]\n",
" else:\n",
" newname = images[i][:-4]\n",
" newnamesforlater.append(newname + \"_Compressed.txt\")\n",
" with open(newname + \"_Compressed.txt\", 'wb') as f:\n",
" f.write(inletters)\n",
" file_size_ratios.append((os.path.getsize(newname + \"_Compressed.txt\"))/os.path.getsize('images/1626032610_393963/1626032610_393963_0.tiff'))"
]
},
{
"cell_type": "code",
"execution_count": 62,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0.4946477235897303\n"
]
}
],
"source": [
"print(np.mean(file_size_ratios))"
]
},
{
"cell_type": "code",
"execution_count": 63,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.39535481750525336"
]
},
"execution_count": 63,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"0.39535481750525336"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Tomorrow note: Make new thing so that it rounds better, can be used losslessly while maintaining the same dict. Currently: wildly off."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"info@elphel.com\n",
"801.599.6216\n",
"pdf4eclipse\n",
"Don't use 6, the channel is broken"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"True\n"
]
}
],
"source": [
"def bytes_to_bitstring(input_bytearray):\n",
" end_string = \"\"\n",
" int_array = [i for i in input_bytearray]\n",
" for i, item in enumerate(int_array):\n",
" end_string += (bin(item)[2:].zfill(8))\n",
" return end_string\n",
"list_dic = np.load(\"first_dic.npy\", allow_pickle=\"TRUE\")\n",
"\n",
"\n",
"# ogbins = [12,60,180]\n",
"ogbins = [30,70,141]\n",
"for i,item in enumerate(newnamesforlater):\n",
" image, new_error, diff = huffman(images[125+i], 4, False)\n",
" encoded_string2 = bytes_to_bitstring(read_from_file(item))\n",
" reconstruct_image = decoder(encoded_string2, list_dic, ogbins, False)\n",
" print(np.allclose(image, reconstruct_image))"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [],
"source": [
"def check_bin_size(tiff_image_path_list, num_bins=4, difference = True):\n",
" \"\"\"\n",
" This function is used to encode the error based on the difference\n",
" and split the difference into different bins\n",
" \n",
" Input:\n",
" tiff_image_path (string): path to the tiff file\n",
" num_bins (int): number of bins\n",
" \n",
" Return:\n",
" huffman_encoding_list list (num_bins + 1): a list of dictionary\n",
" image_array ndarray (512, 640): original image\n",
" new_error ndarray (512, 640): error that includes the boundary\n",
" diff ndarray (510, 638): difference of min and max of the 4 neighbors\n",
" boundary ndarray (2300,): the boundary values after subtracting the very first pixel value\n",
" predict ndarray (325380,): the list of predicted values\n",
" bins list (num_bins - 1,): a list of threshold to cut the bins\n",
" A ndarray (3 X 3): system of equation\n",
" \n",
" \"\"\"\n",
" all_bins = []\n",
" for i, tiff_image_path in enumerate(tiff_image_path_list):\n",
" # get the image_array, etc\n",
" image_array, predict, diff, error= predict_pix(tiff_image_path, difference)\n",
" \n",
" # calculate the number of points that will go in each bin\n",
" data_points_per_bin = diff.size // num_bins\n",
"\n",
" # sort the difference and create the bins\n",
" sorted_diff = np.sort(diff.copy())\n",
" bins = [sorted_diff[i*data_points_per_bin] for i in range(1,num_bins)]\n",
" all_bins.append(bins)\n",
" return np.mean(all_bins,axis = 0), np.min(all_bins,axis = 0), np.max(all_bins,axis=0)\n"
]
},
{
"cell_type": "code",
"execution_count": 70,
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "not enough values to unpack (expected 4, got 3)",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m/home/bryce/git/master/SameTableEncoder.ipynb Cell 19'\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000019?line=0'>1</a>\u001b[0m \u001b[39mprint\u001b[39m(check_bin_size(images))\n",
"\u001b[1;32m/home/bryce/git/master/SameTableEncoder.ipynb Cell 18'\u001b[0m in \u001b[0;36mcheck_bin_size\u001b[0;34m(tiff_image_path_list, num_bins, difference)\u001b[0m\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000017?line=20'>21</a>\u001b[0m all_bins \u001b[39m=\u001b[39m []\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000017?line=21'>22</a>\u001b[0m \u001b[39mfor\u001b[39;00m i, tiff_image_path \u001b[39min\u001b[39;00m \u001b[39menumerate\u001b[39m(tiff_image_path_list):\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000017?line=22'>23</a>\u001b[0m \u001b[39m# get the image_array, etc\u001b[39;00m\n\u001b[0;32m---> <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000017?line=23'>24</a>\u001b[0m image_array, predict, diff, error\u001b[39m=\u001b[39m predict_pix(tiff_image_path, difference)\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000017?line=25'>26</a>\u001b[0m \u001b[39m# calculate the number of points that will go in each bin\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/SameTableEncoder.ipynb#ch0000017?line=26'>27</a>\u001b[0m data_points_per_bin \u001b[39m=\u001b[39m diff\u001b[39m.\u001b[39msize \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m num_bins\n",
"\u001b[0;31mValueError\u001b[0m: not enough values to unpack (expected 4, got 3)"
]
}
],
"source": [
"print(check_bin_size(images))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"interpreter": {
"hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
},
"kernelspec": {
"display_name": "Python 3.8.10 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
...@@ -515,7 +515,7 @@ def text_to_tiff(filename, list_dic, bins): ...@@ -515,7 +515,7 @@ def text_to_tiff(filename, list_dic, bins):
if __name__ == "__main__": if __name__ == "__main__":
scenes = file_extractor("averaged_images(11)") scenes = file_extractor("images")
images = image_extractor(scenes) images = image_extractor(scenes)
newnamesforlater = [] newnamesforlater = []
list_dic, bins = make_dictionary(images, 4, False) list_dic, bins = make_dictionary(images, 4, False)
...@@ -534,16 +534,13 @@ if __name__ == "__main__": ...@@ -534,16 +534,13 @@ if __name__ == "__main__":
newname = images[i][:-5] newname = images[i][:-5]
else: else:
newname = images[i][:-4] newname = images[i][:-4]
# print(newname)
newnamesforlater.append(newname + "_Compressed.txt") newnamesforlater.append(newname + "_Compressed.txt")
with open(newname + "_Compressed.txt", 'wb') as f: with open(newname + "_Compressed.txt", 'wb') as f:
f.write(inletters) f.write(inletters)
file_sizes_new.append((os.path.getsize(newname + "_Compressed.txt"))) file_sizes_new.append((os.path.getsize(newname + "_Compressed.txt")))
file_sizes_old.append((os.path.getsize(images[i]))) file_sizes_old.append((os.path.getsize(images[i])))
# sleep(5)
# if i % 50 == 0:
# print(i)
# sleep(20)
print(file_sizes_new) print(file_sizes_new)
print(file_sizes_old) print(file_sizes_old)
print(np.sum(file_sizes_new)/np.sum(file_sizes_old)) print(np.sum(file_sizes_new)/np.sum(file_sizes_old))
...@@ -551,7 +548,7 @@ if __name__ == "__main__": ...@@ -551,7 +548,7 @@ if __name__ == "__main__":
print(np.sum(file_sizes_new)/np.sum(file_sizes_old)) print(np.sum(file_sizes_new)/np.sum(file_sizes_old))
# list_dic = np.load("first_dic.npy", allow_pickle="TRUE") # list_dic = np.load("first_dic.npy", allow_pickle="TRUE")
bins = [21,32,48] bins = [21,32,48]
# os.system("ls")
# for i,item in enumerate(newnamesforlater): # for i,item in enumerate(newnamesforlater):
# print(item) # print(item)
# image, new_error, diff = huffman(images[i], 4, False) # image, new_error, diff = huffman(images[i], 4, False)
......
\documentclass{article}
\usepackage{caption}
\usepackage{subcaption}
% Language setting
% Replace `english' with e.g. `spanish' to change the document language
\usepackage[english]{babel}
% Set page size and margins
% Replace `letterpaper' with `a4paper' for UK/EU standard size
\usepackage[letterpaper,top=2cm,bottom=2cm,left=3cm,right=3cm,marginparwidth=1.75cm]{geometry}
% Useful packages
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\title{Scanning Compression Algorithm for Thermal Images}
\author{Dr. Andrey Filippov, Bryce Hepner, Nathaniel Callens Jr., and Kelly Chang}
\begin{document}
\maketitle
\begin{abstract}
This algorithm operates by scanning through each pixel first laterally, using already scanned pixels to decompress the next pixel's value. By encoding the error between the predicted pixel value and the actual value and saving that, we were able to get the compressed image to be about 40\% of the original size losslessly.
\end{abstract}
\section{Introduction}
The idea is based off of how images are scanned in originally. Like a cathode-ray tube in a television, the algorithm goes line by line, reading/writing each pixel individually. Each pixel, as long as it is not on the top or side boundaries, will have 4 neighbors that have already been read into the machine. Those points can be interpolated and used to predict the next pixel's value.
The goal is to encode the error between that value and the original value and use that to compress and decompress the image. Even though a possibly larger integer may need to be stored, it's more likely that the guess will be correct, or off by a small margin, making the distribution more normalized and less uniform.
\begin{figure}[h]
\centering
\includegraphics[width=0.15\textwidth]{PixelArrangement.png}
\caption{\label{fig:pixels}The other 4 pixels are used to find the value of the 5th.}
\end{figure}
\section{Background}
The images that were used in the backing of this paper are all thermal images. The images were taken at day and at night, giving a broad range. The sensors that were used store numbers as integers on a range from
% TODO: Find out min and max values
\begin{figure}[h]
\centering
\begin{subfigure}{.4\textwidth}
\centering
\includegraphics[width=\linewidth]{Uniform_No_Title.png}
\caption{Encoding the Pixel Values}
\label{fig:sub1}
\end{subfigure}%
\begin{subfigure}{.4\textwidth}
\centering
\includegraphics[width=\linewidth]{Normal_No_Title.png}
\caption{Encoding the Error Values}
\label{fig:sub2}
\end{subfigure}
\caption{Histograms of the Information to Encode}
\label{fig:test}
\end{figure}
\pagebreak
For other uses of this algorithm, if it were to be used on floating point values with much higher precision, it would be advantageous to change this to a lossy format and round to whatever decimal place is best for the specific application. Our sensors do not need this because they round values to the optical equivalent of 3 decimal points on a 256 pixel value scale.
\section{The Approach}
To begin, the border is just encoded into the system. There are not very many values here and the algorithm needs a place to start. Guessing the first and encoding it would be the same. Once the middle points are reached, the pixel to the left, top left, directly above, and top right have already been read in. Each of these values is given a point in the x-y plane, with the top left at (-1,1), top pixel at (0,1), top right pixel at (1,1), and the middle left pixel at (-1,0), giving the target (0,0). Using the formula for a plane in 3D ($ax + by + c = z$) we get the system of equations$$-a + b + c = z_0$$ $$b + c = z_1$$ $$a + b + c = z_2$$ $$-a + c = z_3$$.
These complete the form $Ax = b$ as
$$A =
\begin{bmatrix}
-1 & 1 & 1\\
0 & 1 & 1 \\
1 & 1 & 1 \\
-1 & 0 & 1
\end{bmatrix}
$$
$$b =
\begin{bmatrix}
z_0\\
z_1 \\
z_2 \\
z_3
\end{bmatrix}
$$
Which, due to there being 4 equations and 4 unknowns, is unsolvable.
This can be corrected by making
$$A =
\begin{bmatrix}
3 & 0 & -1\\
0 & 3 & 3 \\
1 & -3 & -4
\end{bmatrix}
$$
and
$$b =
\begin{bmatrix}
-z_0 + z_2 - z_3\\
z_0 + z_1 + z_2 \\
-z_0 - z_1 - z_2 - z_3
\end{bmatrix}
$$
.
The new matrix is full rank and can therefore be solved using \textbf{numpy.linalg.solve}. This is then converted back to the $ax+by+c=z$ to find the c value, which is the predicted pixel value.
Huffman encoding performs well on data with a normalized distribution, which makes the error numbers a good candidate. Most pixels will be off by 0 or 1 since most objects have close to uniform surface temperature or have a regular temperature gradient.
In order to control for objects in images that are known to have an unpredictable temperature (fail the cases before), a bin system is used. The residuals from \textbf{np.linalg.lstsq} are used to determine the difference across the 4 known points, which is then used to place it in a category. This number is the difference between trying to fit a plane between 4 different points. If a plane is able to be drawn that contains all 4 points, it makes sense that the error will be much smaller than if the best fitted plane was not very close to any of the points. 5 bins were used with splits chosen by evenly distributing the difference numbers into evenly sized bins. Many of the images had several different bin sizes ranging from 11 in the first category to a difference of 30 as the first category. An average number between all of them was chosen, since using the average versus specific bins had an effect on compression of less than half a percent, and made implementation easier.
\section{Results}
We attained an average compression ratio of $.404$ on a set of 250 images, with values ranging from $.368$ to $.496$. When the size of the saved dictionary was included, the compression ratio only changed from $.404$ to $.405$, but on a single image it only compressed it down to $.705$. Despite no explicit use of other images in the compression, the total ratio decreases by using other images due to the shared dictionary. A universal dictionary cannot be created then applied on a new image due to how the border is saved. The values are just stored as part of the dictionary, and it is unlikely that the universal dictionary would have these values, and if it did, it would be too large due to the Huffman encoded nature. There are ways around this but they were not explored here.
% \subsection{How to include Figures}
% First you have to upload the image file from your computer using the upload link in the file-tree menu. Then use the includegraphics command to include it in your document. Use the figure environment and the caption command to add a number and a caption to your figure. See the code for Figure \ref{fig:frog} in this section for an example.
% Note that your figure will automatically be placed in the most appropriate place for it, given the surrounding text and taking into account other figures or tables that may be close by. You can find out more about adding images to your documents in this help article on \href{https://www.overleaf.com/learn/how-to/Including_images_on_Overleaf}{including images on Overleaf}.
% \begin{figure}
% \centering
% \includegraphics[width=0.3\textwidth]{frog.jpg}
% \caption{\label{fig:frog}This frog was uploaded via the file-tree menu.}
% \end{figure}
% \subsection{How to add Tables}
% Use the table and tabular environments for basic tables --- see Table~\ref{tab:widgets}, for example. For more information, please see this help article on \href{https://www.overleaf.com/learn/latex/tables}{tables}.
% \begin{table}
% \centering
% \begin{tabular}{l|r}
% Item & Quantity \\\hline
% Widgets & 42 \\
% Gadgets & 13
% \end{tabular}
% \caption{\label{tab:widgets}An example table.}
% \end{table}
% \subsection{How to add Comments and Track Changes}
% Comments can be added to your project by highlighting some text and clicking ``Add comment'' in the top right of the editor pane. To view existing comments, click on the Review menu in the toolbar above. To reply to a comment, click on the Reply button in the lower right corner of the comment. You can close the Review pane by clicking its name on the toolbar when you're done reviewing for the time being.
% Track changes are available on all our \href{https://www.overleaf.com/user/subscription/plans}{premium plans}, and can be toggled on or off using the option at the top of the Review pane. Track changes allow you to keep track of every change made to the document, along with the person making the change.
% \subsection{How to add Lists}
% You can make lists with automatic numbering \dots
% \begin{enumerate}
% \item Like this,
% \item and like this.
% \end{enumerate}
% \dots or bullet points \dots
% \begin{itemize}
% \item Like this,
% \item and like this.
% \end{itemize}
% \subsection{How to write Mathematics}
% \LaTeX{} is great at typesetting mathematics. Let $X_1, X_2, \ldots, X_n$ be a sequence of independent and identically distributed random variables with $\text{E}[X_i] = \mu$ and $\text{Var}[X_i] = \sigma^2 < \infty$, and let
% \[S_n = \frac{X_1 + X_2 + \cdots + X_n}{n}
% = \frac{1}{n}\sum_{i}^{n} X_i\]
% denote their mean. Then as $n$ approaches infinity, the random variables $\sqrt{n}(S_n - \mu)$ converge in distribution to a normal $\mathcal{N}(0, \sigma^2)$.
% \subsection{How to change the margins and paper size}
% Usually the template you're using will have the page margins and paper size set correctly for that use-case. For example, if you're using a journal article template provided by the journal publisher, that template will be formatted according to their requirements. In these cases, it's best not to alter the margins directly.
% If however you're using a more general template, such as this one, and would like to alter the margins, a common way to do so is via the geometry package. You can find the geometry package loaded in the preamble at the top of this example file, and if you'd like to learn more about how to adjust the settings, please visit this help article on \href{https://www.overleaf.com/learn/latex/page_size_and_margins}{page size and margins}.
% \subsection{How to change the document language and spell check settings}
% Overleaf supports many different languages, including multiple different languages within one document.
% To configure the document language, simply edit the option provided to the babel package in the preamble at the top of this example project. To learn more about the different options, please visit this help article on \href{https://www.overleaf.com/learn/latex/International_language_support}{international language support}.
% To change the spell check language, simply open the Overleaf menu at the top left of the editor window, scroll down to the spell check setting, and adjust accordingly.
% \subsection{How to add Citations and a References List}
% You can simply upload a \verb|.bib| file containing your BibTeX entries, created with a tool such as JabRef. You can then cite entries from it, like this: \cite{greenwade93}. Just remember to specify a bibliography style, as well as the filename of the \verb|.bib|. You can find a \href{https://www.overleaf.com/help/97-how-to-include-a-bibliography-using-bibtex}{video tutorial here} to learn more about BibTeX.
% If you have an \href{https://www.overleaf.com/user/subscription/plans}{upgraded account}, you can also import your Mendeley or Zotero library directly as a \verb|.bib| file, via the upload menu in the file-tree.
% \subsection{Good luck!}
% We hope you find Overleaf useful, and do take a look at our \href{https://www.overleaf.com/learn}{help library} for more tutorials and user guides! Please also let us know if you have any feedback using the Contact Us link at the bottom of the Overleaf menu --- or use the contact form at \url{https://www.overleaf.com/contact}.
% \bibliographystyle{alpha}
% \bibliography{sample}
\end{document}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment