Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
I
image-compression
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
image-compression
Commits
2659c0f6
Commit
2659c0f6
authored
Apr 14, 2022
by
Kelly Chang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
kelly changes
parent
88251d36
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
185 additions
and
98 deletions
+185
-98
.DS_Store
.DS_Store
+0
-0
Encoding_decoding-checkpoint.ipynb
.ipynb_checkpoints/Encoding_decoding-checkpoint.ipynb
+112
-61
Encoding_decoding.ipynb
Encoding_decoding.ipynb
+73
-37
No files found.
.DS_Store
View file @
2659c0f6
No preview for this file type
.ipynb_checkpoints/Encoding_decoding-checkpoint.ipynb
View file @
2659c0f6
...
...
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count":
12
,
"execution_count":
59
,
"id": "14f74f21",
"metadata": {},
"outputs": [],
...
...
@@ -19,12 +19,13 @@
"from sklearn.neighbors import KernelDensity\n",
"import pandas as pd\n",
"from collections import Counter\n",
"import time"
"import time\n",
"import numpy.linalg as la"
]
},
{
"cell_type": "code",
"execution_count":
13
,
"execution_count":
49
,
"id": "c16af61f",
"metadata": {},
"outputs": [],
...
...
@@ -78,12 +79,12 @@
},
{
"cell_type": "code",
"execution_count":
14
,
"id": "
aceba613
",
"execution_count":
61
,
"id": "
8172fa41
",
"metadata": {},
"outputs": [],
"source": [
"def predict_pix(tiff_image):\n",
"def predict_pix(tiff_image
, difference = True
):\n",
" \"\"\"\n",
" This function predict the pixel values excluding the boundary.\n",
" Using the 4 neighbor pixel values and MSE to predict the next pixel value\n",
...
...
@@ -99,16 +100,15 @@
" \n",
" Return:\n",
" image (512 X 640): original image \n",
" predict (325380,): predicted image exclude the boundary\n",
" diff. (325380,): difference between the min and max of four neighbors exclude the boundary\n",
" predict (325380,): predicted image excluding the boundary\n",
" diff. (325380,): IF difference = TRUE, difference between the min and max of four neighbors exclude the boundary\n",
" ELSE: the residuals of the four nearest pixels to a fitted hyperplane\n",
" error (325380,): difference between the original image and predicted image\n",
" A (3 X 3): system of equation\n",
" \"\"\"\n",
" image = Image.open(tiff_image) #Open the image and read it as an Image object\n",
" image = np.array(image)[1:,:] #Convert to an array, leaving out the first row because the first row is just housekeeping data\n",
" image = image.astype(int)\n",
" print(image.shape)\n",
" # use \n",
" image = image.astype(int) \n",
" A = np.array([[3,0,-1],[0,3,3],[1,-3,-4]]) # the matrix for system of equation\n",
" # where z0 = (-1,1), z1 = (0,1), z2 = (1,1), z3 = (-1,0)\n",
" z0 = image[0:-2,0:-2] # get all the first pixel for the entire image\n",
...
...
@@ -123,23 +123,37 @@
" # use numpy solver to solve the system of equations all at once\n",
" #predict = np.floor(np.linalg.solve(A,y)[-1])\n",
" predict = np.round(np.round((np.linalg.solve(A,y)[-1]),1))\n",
" \n",
" #Matrix system of points that will be used to solve the least squares fitting hyperplane\n",
" points = np.array([[-1,-1,1], [-1,0,1], [-1,1,1], [0,-1,1]])\n",
" \n",
" # flatten the neighbor pixlels and stack them together\n",
" z0 = np.ravel(z0)\n",
" z1 = np.ravel(z1)\n",
" z2 = np.ravel(z2)\n",
" z3 = np.ravel(z3)\n",
" neighbor = np.vstack((z0,z1,z2,z3)).T\n",
" \n",
" if difference:\n",
" # calculate the difference\n",
" diff = np.max(neighbor,axis = 1) - np.min(neighbor, axis=1)\n",
" \n",
" else:\n",
" #Compute the best fitting hyperplane using least squares\n",
" #The res is the residuals of the four points used to fit the hyperplane (summed distance of each of the \n",
" #points to the hyperplane), it is a measure of gradient\n",
" f, diff, rank, s = la.lstsq(points, neighbor.T, rcond=None) \n",
" diff = diff.astype(int)\n",
" \n",
" # calculate the error\n",
" error = np.ravel(image[1:-1,1:-1])-predict\n",
" \n",
" return image, predict, diff, error, A
\n
"
" return image, predict, diff, error, A"
]
},
{
"cell_type": "code",
"execution_count":
15
,
"execution_count":
62
,
"id": "6b965751",
"metadata": {},
"outputs": [],
...
...
@@ -192,12 +206,12 @@
},
{
"cell_type": "code",
"execution_count":
24
,
"execution_count":
63
,
"id": "b7561883",
"metadata": {},
"outputs": [],
"source": [
"def huffman(image, num_bins=4):\n",
"def huffman(image, num_bins=4
, difference = True
):\n",
" \"\"\"\n",
" This function is used to encode the error based on the difference\n",
" and split the difference into different bins\n",
...
...
@@ -218,7 +232,7 @@
" \n",
" \"\"\"\n",
" # get the prediction error and difference\n",
" image, predict, diff, error, A = predict_pix(image)\n",
" image, predict, diff, error, A = predict_pix(image
, difference
)\n",
" \n",
" # get the number of points in each bins\n",
" data_points_per_bin = len(diff) // num_bins\n",
...
...
@@ -300,18 +314,23 @@
},
{
"cell_type": "code",
"execution_count":
17
,
"execution_count":
64
,
"id": "2eb774d2",
"metadata": {},
"outputs": [],
"source": [
"def encoder(error, list_dic, diff, bound, bins):\n",
" \"\"\"\n",
<<<<<<< HEAD
" This function en\n",
=======
" This function \n",
>>>>>>> a352868acf14cebeaa7ea2a58e879b1d36b066ad
" This function encode the matrix with huffman coding tables\n",
" \n",
" Input:\n",
" error (512, 640): a matrix with all the errors\n",
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" bound (2300,): the boundary values after subtracting the very first pixel value\n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" encoded (512, 640): encoded matrix\n",
" \"\"\"\n",
" # copy the error matrix (including the boundary)\n",
" encoded = np.copy(error).astype(int).astype(str).astype(object)\n",
...
...
@@ -335,15 +354,22 @@
},
{
"cell_type": "code",
"execution_count":
23
,
"execution_count":
79
,
"id": "8eeb40d0",
"metadata": {},
"outputs": [],
"source": [
"def decoder(A, encoded_matrix, list_dic, bins):\n",
"def decoder(A, encoded_matrix, list_dic, bins
, use_diff
):\n",
" \"\"\"\n",
" Function that accecpts the prediction matrix A for the linear system,\n",
" the encoded matrix of error values, and the encoding dicitonary.\n",
" This function decodes the encoded_matrix.\n",
" Input:\n",
" A (3 X 3): system of equation\n",
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" encoded_matrix (512, 640): encoded matrix\n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" decode_matrix (512, 640): decoded matrix\n",
" \"\"\"\n",
" # change the dictionary back to list\n",
" # !!!!!WARNING!!!! has to change this part, eveytime you change the number of bins\n",
...
...
@@ -362,62 +388,85 @@
" the_keys4 = list(list_dic[4].keys())\n",
" the_values4 = list(list_dic[4].values())\n",
" \n",
" error_matrix = np.zeros((512,640))\n",
" #Matrix system of points that will be used to solve the least squares fitting hyperplane\n",
" points = np.array([[-1,-1,1], [-1,0,1], [-1,1,1], [0,-1,1]])\n",
" \n",
" decode_matrix = np.zeros((512,640))\n",
" # loop through all the element in the matrix\n",
" for i in range(
error
_matrix.shape[0]):\n",
" for j in range(
error
_matrix.shape[1]):\n",
" for i in range(
decode
_matrix.shape[0]):\n",
" for j in range(
decode
_matrix.shape[1]):\n",
" # if it's the very first pixel on the image\n",
" if i == 0 and j == 0:\n",
"
error
_matrix[i][j] = int(the_keys0[the_values0.index(encoded_matrix[i,j])])\n",
"
decode
_matrix[i][j] = int(the_keys0[the_values0.index(encoded_matrix[i,j])])\n",
" # if it's on the boundary\n",
" elif i == 0 or i ==
error_matrix.shape[0]-1 or j == 0 or j == error
_matrix.shape[1]-1:\n",
"
error_matrix[i][j] = int(the_keys0[the_values0.index(encoded_matrix[i,j])]) + error
_matrix[0][0]\n",
" elif i == 0 or i ==
decode_matrix.shape[0]-1 or j == 0 or j == decode
_matrix.shape[1]-1:\n",
"
decode_matrix[i][j] = int(the_keys0[the_values0.index(encoded_matrix[i,j])]) + decode
_matrix[0][0]\n",
" # if not the boundary\n",
" else:\n",
" # predict the image with the known pixel value\n",
" z0 =
error
_matrix[i-1][j-1]\n",
" z1 =
error
_matrix[i-1][j]\n",
" z2 =
error
_matrix[i-1][j+1]\n",
" z3 =
error
_matrix[i][j-1]\n",
" z0 =
decode
_matrix[i-1][j-1]\n",
" z1 =
decode
_matrix[i-1][j]\n",
" z2 =
decode
_matrix[i-1][j+1]\n",
" z3 =
decode
_matrix[i][j-1]\n",
" y0 = int(-z0+z2-z3)\n",
" y1 = int(z0+z1+z2)\n",
" y2 = int(-z0-z1-z2-z3)\n",
" y = np.vstack((y0,y1,y2))\n",
" if use_diff:\n",
" difference = max(z0,z1,z2,z3) - min(z0,z1,z2,z3)\n",
" else:\n",
" \n",
" f, difference, rank, s = la.lstsq(points, [z0,z1,z2,z3], rcond=None) \n",
" difference = difference.astype(int)\n",
" \n",
" predict = np.round(np.round(np.linalg.solve(A,y)[-1][0],1))\n",
" \n",
" # add on the difference by searching the dictionary\n",
" # !!!!!WARNING!!!! has to change this part, eveytime you change the number of bins\n",
" if difference <= bins[0]:\n",
"
error
_matrix[i][j] = int(the_keys1[the_values1.index(encoded_matrix[i,j])]) + int(predict)\n",
"
decode
_matrix[i][j] = int(the_keys1[the_values1.index(encoded_matrix[i,j])]) + int(predict)\n",
" elif difference <= bins[1] and difference > bins[0]:\n",
"
error
_matrix[i][j] = int(the_keys2[the_values2.index(encoded_matrix[i,j])]) + int(predict)\n",
"
decode
_matrix[i][j] = int(the_keys2[the_values2.index(encoded_matrix[i,j])]) + int(predict)\n",
" elif difference <= bins[2] and difference > bins[1]:\n",
"
error
_matrix[i][j] = int(the_keys3[the_values3.index(encoded_matrix[i,j])]) + int(predict)\n",
"
decode
_matrix[i][j] = int(the_keys3[the_values3.index(encoded_matrix[i,j])]) + int(predict)\n",
" else:\n",
"
error
_matrix[i][j] = int(the_keys4[the_values4.index(encoded_matrix[i,j])]) + int(predict)\n",
"
decode
_matrix[i][j] = int(the_keys4[the_values4.index(encoded_matrix[i,j])]) + int(predict)\n",
" \n",
" \n",
" return
error
_matrix.astype(int)"
" return
decode
_matrix.astype(int)"
]
},
{
"cell_type": "code",
"execution_count":
19
,
"execution_count":
74
,
"id": "f959fe93",
"metadata": {},
"outputs": [],
"source": [
"def compress_rate(image, error, diff, bound, list_dic, bins):\n",
"def compress_rate(image, new_error, diff, bound, list_dic, bins):\n",
" '''\n",
" This function is used to calculate the compression rate.\n",
" Input:\n",
" image (512, 640): original image\n",
" new_error (512, 640): error that includes the boundary\n",
" diff (510, 638): difference of min and max of the 4 neighbors\n",
" bound (2300,): the boundary values after subtracting the very first pixel value\n",
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" compression rate\n",
" '''\n",
" # the bits for the original image\n",
" o_len = 0\n",
" # the bits for the compressed image\n",
" c_len = 0\n",
" # initializing the varible \n",
" im = np.reshape(image,(512, 640))\n",
" real_b = np.hstack((im
[0,:],im[-1,:],im[1:-1,0],im
[1:-1,-1]))\n",
" original = im[1:-1,1:-1].reshape(-1)\n",
" real_b = np.hstack((im
age[0,:],image[-1,:],image[1:-1,0],image
[1:-1,-1]))\n",
" original = im
age
[1:-1,1:-1].reshape(-1)\n",
" diff = diff.reshape(-1)\n",
" error = new_error[1:-1,1:-1].reshape(-1)\n",
" \n",
" # calculate the bit for boundary\n",
" for i in range(0,len(bound)):\n",
...
...
@@ -426,6 +475,7 @@
" \n",
" # calculate the bit for the pixels inside the boundary\n",
" for i in range(0,len(original)):\n",
"\n",
" # for the original image\n",
" o_len += len(bin(original[i])[2:])\n",
" \n",
...
...
@@ -441,14 +491,14 @@
" c_len += len(list_dic[3][str(int(error[i]))])\n",
"\n",
" else: \n",
" c_len += len(list_dic[
5
][str(int(error[i]))])\n",
" c_len += len(list_dic[
4
][str(int(error[i]))])\n",
"\n",
" return c_len/o_len"
]
},
{
"cell_type": "code",
"execution_count":
25
,
"execution_count":
80
,
"id": "3e0e9742",
"metadata": {},
"outputs": [
...
...
@@ -456,7 +506,6 @@
"name": "stdout",
"output_type": "stream",
"text": [
"(512, 640)\n",
"True\n",
"5\n"
]
...
...
@@ -465,30 +514,32 @@
"source": [
"scenes = file_extractor()\n",
"images = image_extractor(scenes)\n",
"list_dic, image, new_error, diff, bound, predict, bins, A = huffman(images[0], 4)\n",
"list_dic, image, new_error, diff, bound, predict, bins, A = huffman(images[0], 4
, False
)\n",
"encoded_matrix = encoder(new_error, list_dic, diff, bound, bins)\n",
"reconstruct_image = decoder(A, encoded_matrix, list_dic, bins)\n",
"reconstruct_image = decoder(A, encoded_matrix, list_dic, bins
, False
)\n",
"print(np.allclose(image, reconstruct_image))\n",
"print(len(list_dic))"
]
},
{
"cell_type": "code",
"execution_count":
32
,
"execution_count":
81
,
"id": "004e8ba8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[26, 40, 62]\n"
"data": {
"text/plain": [
"0.4437662760416667"
]
},
"execution_count": 81,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\n",
"print(bins)"
"compress_rate(image, new_error, diff, bound, list_dic, bins)"
]
},
{
...
...
Encoding_decoding.ipynb
View file @
2659c0f6
...
...
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count":
34
,
"execution_count":
59
,
"id": "14f74f21",
"metadata": {},
"outputs": [],
...
...
@@ -19,12 +19,13 @@
"from sklearn.neighbors import KernelDensity\n",
"import pandas as pd\n",
"from collections import Counter\n",
"import time"
"import time\n",
"import numpy.linalg as la"
]
},
{
"cell_type": "code",
"execution_count":
35
,
"execution_count":
49
,
"id": "c16af61f",
"metadata": {},
"outputs": [],
...
...
@@ -78,12 +79,12 @@
},
{
"cell_type": "code",
"execution_count":
36
,
"id": "
aceba613
",
"execution_count":
61
,
"id": "
8172fa41
",
"metadata": {},
"outputs": [],
"source": [
"def predict_pix(tiff_image):\n",
"def predict_pix(tiff_image
, difference = True
):\n",
" \"\"\"\n",
" This function predict the pixel values excluding the boundary.\n",
" Using the 4 neighbor pixel values and MSE to predict the next pixel value\n",
...
...
@@ -99,16 +100,15 @@
" \n",
" Return:\n",
" image (512 X 640): original image \n",
" predict (325380,): predicted image exclude the boundary\n",
" diff. (325380,): difference between the min and max of four neighbors exclude the boundary\n",
" predict (325380,): predicted image excluding the boundary\n",
" diff. (325380,): IF difference = TRUE, difference between the min and max of four neighbors exclude the boundary\n",
" ELSE: the residuals of the four nearest pixels to a fitted hyperplane\n",
" error (325380,): difference between the original image and predicted image\n",
" A (3 X 3): system of equation\n",
" \"\"\"\n",
" image = Image.open(tiff_image) #Open the image and read it as an Image object\n",
" image = np.array(image)[1:,:] #Convert to an array, leaving out the first row because the first row is just housekeeping data\n",
" image = image.astype(int)\n",
" print(image.shape)\n",
" # use \n",
" image = image.astype(int) \n",
" A = np.array([[3,0,-1],[0,3,3],[1,-3,-4]]) # the matrix for system of equation\n",
" # where z0 = (-1,1), z1 = (0,1), z2 = (1,1), z3 = (-1,0)\n",
" z0 = image[0:-2,0:-2] # get all the first pixel for the entire image\n",
...
...
@@ -123,23 +123,37 @@
" # use numpy solver to solve the system of equations all at once\n",
" #predict = np.floor(np.linalg.solve(A,y)[-1])\n",
" predict = np.round(np.round((np.linalg.solve(A,y)[-1]),1))\n",
" \n",
" #Matrix system of points that will be used to solve the least squares fitting hyperplane\n",
" points = np.array([[-1,-1,1], [-1,0,1], [-1,1,1], [0,-1,1]])\n",
" \n",
" # flatten the neighbor pixlels and stack them together\n",
" z0 = np.ravel(z0)\n",
" z1 = np.ravel(z1)\n",
" z2 = np.ravel(z2)\n",
" z3 = np.ravel(z3)\n",
" neighbor = np.vstack((z0,z1,z2,z3)).T\n",
" \n",
" if difference:\n",
" # calculate the difference\n",
" diff = np.max(neighbor,axis = 1) - np.min(neighbor, axis=1)\n",
" \n",
" else:\n",
" #Compute the best fitting hyperplane using least squares\n",
" #The res is the residuals of the four points used to fit the hyperplane (summed distance of each of the \n",
" #points to the hyperplane), it is a measure of gradient\n",
" f, diff, rank, s = la.lstsq(points, neighbor.T, rcond=None) \n",
" diff = diff.astype(int)\n",
" \n",
" # calculate the error\n",
" error = np.ravel(image[1:-1,1:-1])-predict\n",
" \n",
" return image, predict, diff, error, A
\n
"
" return image, predict, diff, error, A"
]
},
{
"cell_type": "code",
"execution_count":
37
,
"execution_count":
62
,
"id": "6b965751",
"metadata": {},
"outputs": [],
...
...
@@ -192,12 +206,12 @@
},
{
"cell_type": "code",
"execution_count":
38
,
"execution_count":
63
,
"id": "b7561883",
"metadata": {},
"outputs": [],
"source": [
"def huffman(image, num_bins=4):\n",
"def huffman(image, num_bins=4
, difference = True
):\n",
" \"\"\"\n",
" This function is used to encode the error based on the difference\n",
" and split the difference into different bins\n",
...
...
@@ -218,7 +232,7 @@
" \n",
" \"\"\"\n",
" # get the prediction error and difference\n",
" image, predict, diff, error, A = predict_pix(image)\n",
" image, predict, diff, error, A = predict_pix(image
, difference
)\n",
" \n",
" # get the number of points in each bins\n",
" data_points_per_bin = len(diff) // num_bins\n",
...
...
@@ -300,7 +314,7 @@
},
{
"cell_type": "code",
"execution_count":
39
,
"execution_count":
64
,
"id": "2eb774d2",
"metadata": {},
"outputs": [],
...
...
@@ -340,12 +354,12 @@
},
{
"cell_type": "code",
"execution_count":
40
,
"execution_count":
79
,
"id": "8eeb40d0",
"metadata": {},
"outputs": [],
"source": [
"def decoder(A, encoded_matrix, list_dic, bins):\n",
"def decoder(A, encoded_matrix, list_dic, bins
, use_diff
):\n",
" \"\"\"\n",
" This function decodes the encoded_matrix.\n",
" Input:\n",
...
...
@@ -353,6 +367,7 @@
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" encoded_matrix (512, 640): encoded matrix\n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" decode_matrix (512, 640): decoded matrix\n",
" \"\"\"\n",
...
...
@@ -373,6 +388,9 @@
" the_keys4 = list(list_dic[4].keys())\n",
" the_values4 = list(list_dic[4].values())\n",
" \n",
" #Matrix system of points that will be used to solve the least squares fitting hyperplane\n",
" points = np.array([[-1,-1,1], [-1,0,1], [-1,1,1], [0,-1,1]])\n",
" \n",
" decode_matrix = np.zeros((512,640))\n",
" # loop through all the element in the matrix\n",
" for i in range(decode_matrix.shape[0]):\n",
...
...
@@ -394,7 +412,13 @@
" y1 = int(z0+z1+z2)\n",
" y2 = int(-z0-z1-z2-z3)\n",
" y = np.vstack((y0,y1,y2))\n",
" if use_diff:\n",
" difference = max(z0,z1,z2,z3) - min(z0,z1,z2,z3)\n",
" else:\n",
" \n",
" f, difference, rank, s = la.lstsq(points, [z0,z1,z2,z3], rcond=None) \n",
" difference = difference.astype(int)\n",
" \n",
" predict = np.round(np.round(np.linalg.solve(A,y)[-1][0],1))\n",
" \n",
" # add on the difference by searching the dictionary\n",
...
...
@@ -414,15 +438,24 @@
},
{
"cell_type": "code",
"execution_count":
41
,
"execution_count":
74
,
"id": "f959fe93",
"metadata": {},
"outputs": [],
"source": [
"def compress_rate(image, error, diff, bound, list_dic, bins):\n",
"def compress_rate(image,
new_
error, diff, bound, list_dic, bins):\n",
" '''\n",
" This function is used to calculate the compression rate.\n",
" Input:\n",
" image (512, 640): original image\n",
" new_error (512, 640): error that includes the boundary\n",
" diff (510, 638): difference of min and max of the 4 neighbors\n",
" bound (2300,): the boundary values after subtracting the very first pixel value\n",
" list_dic (num_dic + 1,): a list of huffman coding table \n",
" bins (num_bins - 1,): a list of threshold to cut the bins\n",
" \n",
" Return:\n",
" compression rate\n",
" '''\n",
" # the bits for the original image\n",
" o_len = 0\n",
...
...
@@ -433,6 +466,7 @@
" real_b = np.hstack((image[0,:],image[-1,:],image[1:-1,0],image[1:-1,-1]))\n",
" original = image[1:-1,1:-1].reshape(-1)\n",
" diff = diff.reshape(-1)\n",
" error = new_error[1:-1,1:-1].reshape(-1)\n",
" \n",
" # calculate the bit for boundary\n",
" for i in range(0,len(bound)):\n",
...
...
@@ -441,6 +475,7 @@
" \n",
" # calculate the bit for the pixels inside the boundary\n",
" for i in range(0,len(original)):\n",
"\n",
" # for the original image\n",
" o_len += len(bin(original[i])[2:])\n",
" \n",
...
...
@@ -456,14 +491,14 @@
" c_len += len(list_dic[3][str(int(error[i]))])\n",
"\n",
" else: \n",
" c_len += len(list_dic[
5
][str(int(error[i]))])\n",
" c_len += len(list_dic[
4
][str(int(error[i]))])\n",
"\n",
" return c_len/o_len"
]
},
{
"cell_type": "code",
"execution_count":
42
,
"execution_count":
80
,
"id": "3e0e9742",
"metadata": {},
"outputs": [
...
...
@@ -471,7 +506,6 @@
"name": "stdout",
"output_type": "stream",
"text": [
"(512, 640)\n",
"True\n",
"5\n"
]
...
...
@@ -480,30 +514,32 @@
"source": [
"scenes = file_extractor()\n",
"images = image_extractor(scenes)\n",
"list_dic, image, new_error, diff, bound, predict, bins, A = huffman(images[0], 4)\n",
"list_dic, image, new_error, diff, bound, predict, bins, A = huffman(images[0], 4
, False
)\n",
"encoded_matrix = encoder(new_error, list_dic, diff, bound, bins)\n",
"reconstruct_image = decoder(A, encoded_matrix, list_dic, bins)\n",
"reconstruct_image = decoder(A, encoded_matrix, list_dic, bins
, False
)\n",
"print(np.allclose(image, reconstruct_image))\n",
"print(len(list_dic))"
]
},
{
"cell_type": "code",
"execution_count":
33
,
"execution_count":
81
,
"id": "004e8ba8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(512, 640)\n"
"data": {
"text/plain": [
"0.4437662760416667"
]
},
"execution_count": 81,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\n",
"print(encoded_matrix.shape)"
"compress_rate(image, new_error, diff, bound, list_dic, bins)"
]
},
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment