Project 'Elphel/master' was moved to 'Elphel/image-compression'. Please update any links and bookmarks that may still have the old path.
Commit 96e5883b authored by Bryce Hepner's avatar Bryce Hepner

trying to convolve, don't know how to deconvolve

parent 5476cdb4
......@@ -17,4 +17,5 @@ attic
*.corr-xml
*.DS_Store
lwir16.tar.gz
*.mp4
\ No newline at end of file
*.mp4
*.jpg
\ No newline at end of file
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
......@@ -15,21 +15,21 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3\n",
"3\n",
"0.5416965429002373\n"
"5\n",
"5\n",
"0.30596738362283216\n"
]
}
],
"source": [
"scenes = file_extractor(\"averaged_images(11)\")\n",
"scenes = file_extractor(\"betterimages\")\n",
"images = image_extractor(scenes)\n",
"print(len(images))\n",
"newnamesforlater = []\n",
......@@ -102,14 +102,15 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0.6922330946285837\n"
"0.5066909897511674\n",
"0.6485158723863795\n"
]
}
],
......@@ -123,26 +124,20 @@
" writer = png.Writer(newimage.shape[1], newimage.shape[0], greyscale=True, bitdepth=16)\n",
" writer.write(f, newimage)\n",
" pngsizes.append(os.path.getsize(newnamesforlater[i][:-4] + \".png\"))\n",
"print(np.sum(pngsizes)/np.sum(file_sizes_old))\n"
"print(np.sum(pngsizes)/np.sum(file_sizes_old))\n",
"print(0.6485158723863795)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[-190.26562 -177.99805 -177.38672 ... -166.95898 -156.64062 -145.72461]\n",
" [-175.46094 -163.56055 -183.07617 ... -169.2207 -167.58789 -154.14062]\n",
" [-171.45898 -179.1582 -174.68164 ... -173.10547 -157.51758 -156.97461]\n",
" ...\n",
" [-143.66992 -127.69531 -143.9043 ... 156.90625 170.79102 165.55469]\n",
" [-139.97656 -134.71875 -152.26953 ... 169.49805 158.11914 172.49023]\n",
" [-145.33984 -127.59375 -134.07227 ... 182.88672 169.80273 183.32812]]\n",
"0.3016682254469224\n"
"0.6493572339400003\n"
]
}
],
......
from audioop import mul
from matplotlib.image import composite_images
from WorkingPyDemo import *
from scipy.ndimage.filters import gaussian_filter
......@@ -102,24 +103,24 @@ def find_only_in_channel(images, channel_name = "10"):
def adjust_to_original(new_image, average_image):
original_image_min = np.min(new_image)
original_image_max = np.max(new_image)
# average_image = average_image - np.mean(average_image)
average_image = average_image - np.mean(average_image)
# adjusted_image = average_image - new_image
# adjusted_image = gaussian_filter(new_image,sigma=1)
adjusted_image = new_image - (average_image - gaussian_filter(average_image,sigma=1))
# adjusted_image = gaussian_filter(adjusted_image,sigma=20)
# adjusted_image = new_image + (average_image - np.array(Image.fromarray(average_image).convert("L").filter(ImageFilter.GaussianBlur(radius=4))))
plt.subplot(121)
plt.imshow(color_adjust((adjusted_image)),cmap='gray',vmin = 0, vmax=1)
plt.subplot(122)
plt.imshow(color_adjust(new_image),cmap='gray',vmin = 0, vmax=1)
plt.show()
# plt.subplot(121)
# plt.imshow(color_adjust((adjusted_image)),cmap='gray',vmin = 0, vmax=1)
# plt.subplot(122)
# plt.imshow(color_adjust(new_image),cmap='gray',vmin = 0, vmax=1)
# plt.show()
adjusted_image = adjusted_image - np.min(adjusted_image)
adjusted_image = adjusted_image*(original_image_max-original_image_min)/np.max(adjusted_image)
adjusted_image = adjusted_image + original_image_min
print(adjusted_image.dtype)
# print(adjusted_image.dtype)
return adjusted_image.astype(np.uint16)
def color_adjust(visual_array):
......@@ -144,7 +145,7 @@ def create_testable_images(images, selected_channel, quantity_of_images):
images = find_only_in_channel(images, selected_channel)
# image_locations = np.random.choice(len(images), quantity_of_images, replace=False)
image_locations = np.arange(quantity_of_images) + 10000
image_locations = np.arange(quantity_of_images) + 5000
selected_images = np.array(images)[image_locations]
......@@ -161,7 +162,7 @@ def create_testable_images(images, selected_channel, quantity_of_images):
wherelastslash = item.rfind("/")
image = np.array(image)[1:]
savable_original = Image.fromarray(image)
# savable_original.save("averaged_images(" + selected_channel + ")/innerfolder/original" + item[wherelastslash + 1:])
savable_original.save("original_images(" + selected_channel + ")/innerfolder/original" + item[wherelastslash + 1:])
altered_image = adjust_to_original(image, average_image)
altered_image = Image.fromarray(altered_image)
......@@ -169,30 +170,84 @@ def create_testable_images(images, selected_channel, quantity_of_images):
# average_image = Image.fromarray(average_image)
sftp_client.close()
def save_new_gauss():
"""\
creates gaussian kernel with side length `l` and a sigma of `sig`
"""
# x,y = np.mgrid[-1:1:.003125, -1:1:.003125]
x,y = np.mgrid[-1:1:.003911, -1:1:.003911]
# print(x.shape)
pos = np.dstack((x,y))
# grid = np.zeros((l,l))
# gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))
from scipy.stats import multivariate_normal
# normal_grid = multivariate_normal.pdf(grid, mean = [0]*l, cov = [5]*l)
normal_grid = multivariate_normal([0, 0], [[2.0, 0.3], [0.3, 0.5]]).pdf(pos)
normal_grid = normal_grid/np.sum(normal_grid)
end_image = Image.fromarray(normal_grid)
# fig2 = plt.figure()
# ax2 = fig2.add_subplot(111)
# ax2.contourf(x, y, normal_grid)
# plt.show()
# print(np.sum(np.array(end_image)))
end_image.save("gaussian_kernel.tiff")
def little_inverter(initial_matrix):
n = initial_matrix.shape[0]
initial_matrix = np.hstack((initial_matrix,np.zeros_like(initial_matrix)))
print(initial_matrix.shape)
initial_matrix = initial_matrix.tolist()
for i in range(n):
for j in range(n):
if i == j:
initial_matrix[i][j+n] = 1
# Applying Guass Jordan Elimination
for i in range(n):
if initial_matrix[i][i] == 0.0:
sys.exit('Divide by zero detected!')
for j in range(n):
if i != j:
ratio = initial_matrix[j][i]/initial_matrix[i][i]
for k in range(2*n):
initial_matrix[j][k] = initial_matrix[j][k] - ratio * initial_matrix[i][k]
# Row operation to make principal diagonal element to 1
for i in range(n):
divisor = initial_matrix[i][i]
for j in range(2*n):
initial_matrix[i][j] = initial_matrix[i][j]/divisor
return np.array(initial_matrix)[:,n:]
if __name__ == "__main__":
# save_new_average(350,"11")
save_new_gauss()
gaussian_kernel = np.array(Image.open("gaussian_kernel.tiff"))
scenes = remote_file_extractor("/media/elphel/NVME/lwir16-proc/te0607/scenes/")
images = remote_image_extractor(scenes)
images = find_only_in_channel(images, "11")
# average_image = np.array(Image.open("Average_On_Channel(" + "11" + ").tiff"))
create_testable_images(images,"11",3)
# create_testable_images(images,"11",6)
# plt.imshow(color_adjust(average_image),cmap='gray',vmin = 0, vmax=1)
# plt.show()
# print(len(images))
average_image = remote_create_average(images[10000 - 10: 10000 + 10], "11")
print(len(images))
average_image = remote_create_average(images[5000 - 10: 5000 + 10], "11")
# plt.imshow(color_adjust((average_image - gaussian_filter(average_image,sigma=5))),cmap='gray',vmin = 0, vmax=1)
# plt.show()
sftp_client = setup_remote_sftpclient()
print(len(images))
# print(len(images))
# print(images[10000])
test_image = sftp_client.open(images[10000])
test_image = sftp_client.open(images[4700])
test_image = Image.open(test_image)
test_image = np.array(test_image)[1:]
# print(test_image)
# plt.imshow(color_adjust(adjust_to_original(test_image,average_image)))
# plt.show()
plt.imshow(color_adjust(adjust_to_original(test_image,average_image)),cmap='gray',vmin = 0, vmax=1)
plt.show()
little_more_blurred = gaussian_kernel@adjust_to_original(test_image,average_image)
print(little_inverter(gaussian_kernel)@gaussian_kernel)
plt.imshow(color_adjust(little_inverter(gaussian_kernel)@little_more_blurred),cmap='gray',vmin = 0, vmax=1)
plt.show()
# newimage = Image.fromarray(test_image - average_image)
# newimage.save("NoInterference.tiff")
......
......@@ -2,18 +2,14 @@
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 41,
"metadata": {},
"outputs": [
{
"ename": "SystemError",
"evalue": "<built-in function imwrite> returned NULL without setting an error",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mSystemError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m/home/bryce/git/master/VideoToImage.ipynb Cell 1'\u001b[0m in \u001b[0;36m<cell line: 5>\u001b[0;34m()\u001b[0m\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/VideoToImage.ipynb#ch0000000?line=3'>4</a>\u001b[0m count \u001b[39m=\u001b[39m \u001b[39m0\u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/VideoToImage.ipynb#ch0000000?line=4'>5</a>\u001b[0m \u001b[39mwhile\u001b[39;00m success \u001b[39mand\u001b[39;00m count \u001b[39m<\u001b[39m \u001b[39m5\u001b[39m:\n\u001b[0;32m----> <a href='vscode-notebook-cell:/home/bryce/git/master/VideoToImage.ipynb#ch0000000?line=5'>6</a>\u001b[0m cv2\u001b[39m.\u001b[39;49mimwrite(\u001b[39m\"\u001b[39;49m\u001b[39mbetterimages/innerfolder/fram\u001b[39;49m\u001b[39m%d\u001b[39;49;00m\u001b[39m.jpg\u001b[39;49m\u001b[39m\"\u001b[39;49m \u001b[39m%\u001b[39;49m count, image, \u001b[39m0\u001b[39;49m)\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/VideoToImage.ipynb#ch0000000?line=6'>7</a>\u001b[0m success, image \u001b[39m=\u001b[39m vidcap\u001b[39m.\u001b[39mread()\n\u001b[1;32m <a href='vscode-notebook-cell:/home/bryce/git/master/VideoToImage.ipynb#ch0000000?line=7'>8</a>\u001b[0m count \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m1\u001b[39m\n",
"\u001b[0;31mSystemError\u001b[0m: <built-in function imwrite> returned NULL without setting an error"
"name": "stdout",
"output_type": "stream",
"text": [
"1190\n"
]
}
],
......@@ -22,44 +18,153 @@
"vidcap = cv2.VideoCapture(\"concat_mono-fg_realtime.mp4\")\n",
"success,image = vidcap.read()\n",
"count = 0\n",
"while success and count < 5:\n",
" cv2.imwrite(\"betterimages/innerfolder/fram%d.jpg\" % count, image, 0)\n",
"while success :\n",
" grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
" if count < 926 and count >= 920:\n",
" cv2.imwrite(\"betterimages/innerfolder/fram%d.jpg\" % count, grayimage)\n",
" success, image = vidcap.read()\n",
" count += 1"
" count += 1\n",
"print(count)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 42,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(512, 640, 3)\n",
"(512, 640, 3)\n",
"(512, 640, 3)\n",
"(512, 640, 3)\n",
"(512, 640, 3)\n"
"(512, 640)\n",
"(512, 640)\n",
"(512, 640)\n",
"(512, 640)\n",
"(512, 640)\n",
"(512, 640)\n"
]
}
],
"source": [
"from PIL import Image\n",
"import numpy as np\n",
"for i in range(5):\n",
" im = Image.open(\"betterimages/innerfolder/fram\" + str(i) + \".jpg\", )\n",
"for i in range(920,926):\n",
" im = Image.open(\"betterimages/innerfolder/fram\" + str(i) + \".jpg\")\n",
" print(np.array(im).shape)\n",
" im.save(\"betterimages/innerfolder/fram\" + str(i) + \".tiff\", 'TIFF')"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": []
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"591360\n",
"327680\n"
]
}
],
"source": [
"print(672*880*1190)\n",
"print(512*640*1190)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.5541125541125541"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"327680/591360"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.421875"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"27/64"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(512, 512)\n",
"(512, 640)\n",
"(512, 640)\n"
]
}
],
"source": [
"im = Image.open(\"betterimages/innerfolder/fram\" + str(i) + \".jpg\")\n",
"def create_gaussian(l=5, sig=1.):\n",
" \"\"\"\n",
" creates gaussian kernel with side length `l` and a sigma of `sig`\n",
" \"\"\"\n",
" ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)\n",
" gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))\n",
" kernel = np.outer(gauss, gauss)\n",
" return kernel / np.sum(kernel)\n",
"\n",
"kernel = create_gaussian(512)\n",
"print(kernel.shape)\n",
"im = np.array(im)\n",
"print(im.shape)\n",
"print((kernel@im).shape)"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"320.5"
]
},
"execution_count": 53,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"205120/640"
]
},
{
"cell_type": "code",
......
......@@ -514,7 +514,7 @@ def text_to_tiff(filename, list_dic, bins):
if __name__ == "__main__":
scenes = file_extractor("betterimages")
scenes = file_extractor("original_images(11)")
images = image_extractor(scenes)
newnamesforlater = []
list_dic, bins = make_dictionary(images, 4, False)
......
No preview for this file type
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment