Commit 310f8c6b authored by Bryce Hepner's avatar Bryce Hepner

added saving feature

parent 408baf5a
...@@ -6,4 +6,5 @@ attic ...@@ -6,4 +6,5 @@ attic
/compress_start.pyc /compress_start.pyc
/compress_experiment.ipynb /compress_experiment.ipynb
*.txt *.txt
!backup.txt !backup.txt
\ No newline at end of file *.tiff
\ No newline at end of file
from ipyparallel import Client
from WorkingPyDemo import *
from time import time
def initialize():
"""
Write a function that initializes a Client object, creates a Direct
View with all available engines, and imports scipy.sparse as spar on
all engines. Return the DirectView.
"""
client = Client() # Only works if a cluster is running.
dview = client[:]
dview.execute("from WorkingPyDemo import *") #neede d for others
dview.block = True
return dview
if __name__ == "__main__":
scenes = file_extractor(folder_name)
images = image_extractor(scenes)
dview = initialize()
list_dic = np.load("first_dic.npy", allow_pickle=True)
bins = [21,32,48]
starttime = time()
def save_an_image(filename):
"""
"""
newnamesforlater = []
list_dic = np.load("first_dic.npy", allow_pickle=True)
bins = [21,32,48]
image, new_error, diff = huffman(filename, 4, False)
encoded_string = encoder(new_error, list_dic, diff, bins)
inletters = bitstring_to_bytes(encoded_string)
if filename[-5:] == ".tiff":
newname = filename[:-5]
else:
newname = filename[:-4]
newnamesforlater.append(newname + "_Compressed.txt")
with open(newname + "_Compressed.txt", 'wb') as f:
f.write(inletters)
def decode_an_image(filename):
list_dic = np.load("first_dic.npy", allow_pickle=True)
bins = [21,32,48]
image, new_error, diff = huffman(filename, 4, False)
if filename[-5:] == ".tiff":
newname = filename[:-5]
else:
newname = filename[:-4]
encoded_string2 = bytes_to_bitstring(read_from_file(newname + "_Compressed.txt"))
reconstruct_image = decoder(encoded_string2, list_dic, bins, False)
dview.map_sync(save_an_image, images[200:])
# dview.map_sync(decode_an_image, images[0:6])
print(time() - starttime)
\ No newline at end of file
...@@ -9,7 +9,7 @@ from sklearn.neighbors import KernelDensity ...@@ -9,7 +9,7 @@ from sklearn.neighbors import KernelDensity
from collections import Counter from collections import Counter
import numpy.linalg as la import numpy.linalg as la
from time import time from time import time
import tifffile as tiff
folder_name = "images" folder_name = "images"
outputlocation = "" outputlocation = ""
...@@ -494,34 +494,53 @@ def bytes_to_bitstring(input_bytearray): ...@@ -494,34 +494,53 @@ def bytes_to_bitstring(input_bytearray):
for i, item in enumerate(int_array): for i, item in enumerate(int_array):
end_string += (bin(item)[2:].zfill(8)) end_string += (bin(item)[2:].zfill(8))
return end_string return end_string
def text_to_tiff(filename, list_dic, bins):
encoded_string = bytes_to_bitstring(read_from_file(filename))
reconstruct_image = decoder(encoded_string, list_dic, bins, False)
reconstruct_image = reconstruct_image.astype(np.uint16)
reconstruct_image = Image.fromarray(reconstruct_image)
reconstruct_image.save(filename[:-16]+"_reconstructed.tiff", "TIFF")
# starttime = time() # starttime = time()
scenes = file_extractor(folder_name) if __name__ == "__main__":
newnamesforlater = []
images = image_extractor(scenes) scenes = file_extractor(folder_name)
list_dic, bins = make_dictionary(images, 4, False) images = image_extractor(scenes)
file_sizes_new = [] newnamesforlater = []
file_sizes_old = [] # print(len(images))
# np.save("first_dic.npy", list_dic) list_dic, bins = make_dictionary(images, 4, False)
for i in range(1): # file_sizes_new = []
# image, new_error, diff = huffman(images[i], 4, False) # file_sizes_old = []
# encoded_string = encoder(new_error, list_dic, diff, bins)
# inletters = bitstring_to_bytes(encoded_string) np.save("first_dic.npy", list_dic)
if images[i][:-5] == ".tiff": # for i in range(6):
newname = images[i][:-5] # image, new_error, diff = huffman(images[i], 4, False)
else: # encoded_string = encoder(new_error, list_dic, diff, bins)
newname = images[i][:-4] # inletters = bitstring_to_bytes(encoded_string)
newnamesforlater.append(newname + "_Compressed.txt")
# with open(newname + "_Compressed.txt", 'wb') as f:
# f.write(inletters)
file_sizes_new.append((os.path.getsize(newname + "_Compressed.txt")))
file_sizes_old.append((os.path.getsize(images[i])))
file_sizes_new.append(os.path.getsize("first_dic.npy"))
# print(np.sum(file_sizes_new)/np.sum(file_sizes_old))
# list_dic = np.load("first_dic.npy", allow_pickle="TRUE")
# for i,item in enumerate(newnamesforlater[0:10]): # if images[i][-5:] == ".tiff":
# image, new_error, diff = huffman(images[i], 4, False) # newname = images[i][:-5]
# encoded_string2 = bytes_to_bitstring(read_from_file(item)) # else:
# reconstruct_image = decoder(encoded_string2, list_dic, bins, False) # newname = images[i][:-4]
# print(np.allclose(image, reconstruct_image)) # print(newname)
# newnamesforlater.append(newname + "_Compressed.txt")
# with open(newname + "_Compressed.txt", 'wb') as f:
# f.write(inletters)
# file_sizes_new.append((os.path.getsize(newname + "_Compressed.txt")))
# file_sizes_old.append((os.path.getsize(images[i])))
# file_sizes_new.append(os.path.getsize("first_dic.npy"))
# # print(np.sum(file_sizes_new)/np.sum(file_sizes_old))
# list_dic = np.load("first_dic.npy", allow_pickle="TRUE")
# bins = [21,32,48]
# starttime = time()
# for i,item in enumerate(newnamesforlater[0:6]):
# image, new_error, diff = huffman(images[i], 4, False)
# encoded_string2 = bytes_to_bitstring(read_from_file(item))
# reconstruct_image = decoder(encoded_string2, list_dic, bins, False)
# print(np.allclose(image, reconstruct_image))
# print(time()-starttime)
# text_to_tiff("images/1626033496_437803/1626033496_437803_3._Compressed.txt", list_dic, bins)
# original_image = Image.open("images/1626033496_437803/1626033496_437803_3.tiff")
# original_image = np.array(original_image)[1:]
# secondimage = Image.open("images/1626033496_437803/1626033496_437803_3_reconstructed.tiff")
# secondimage = np.array(secondimage)
# print(np.allclose(original_image, secondimage))
\ No newline at end of file
No preview for this file type
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment