Switched to more modular structure

This commit is contained in:
Thomas 2023-03-24 18:14:44 +01:00
parent 3901e5c0f8
commit e781b46ca5
4 changed files with 292 additions and 278 deletions

27
file_utility.py Normal file
View File

@ -0,0 +1,27 @@
from PIL import Image
def openImageList(list, resize: bool = True):
stack = []
for path in list:
img = Image.open(path)
if resize == True:
img = img.resize((1280, 720))
stack.append(img)
return stack
def saveStacktoFile(stack, quality: int = 75):
'''Saves the arrays in the stack returned by the get exposure stack function to files'''
print("Saving..")
i = 0
path = []
for array in stack:
i += 1
image = Image.fromarray(array)
image.save(f"image_nr{i}.jpg", quality=quality)
path.append(f"image_nr{i}.jpg")
return path
def saveResultToFile(hdr_image , output_path: str = '/', quality: int = 75):
image = Image.fromarray(hdr_image)
image.save(f"{output_path}_hdr.jpg", quality=quality)

23
main.py
View File

@ -1,9 +1,16 @@
import numpyHDR import numpyHDR as hdr
import picamburst as pcb
import file_utility as file
'''Example of a complete HDR process starting with raspicam '''
#Get sequence from raspicam
stack = pcb.get_exposure_stack()
#Process HDR with mertens fusion and post effects
result = hdr.process(stack, 1, 1, 1, True)
#Save Result to File
file = file.saveResultToFile(result, 'hdr/', 75)
#Testfile
hdr = numpyHDR.NumpyHDR()
liste = ['test_hdr0.jpg','test_hdr1.jpg', 'test_hdr2.jpg']
hdr.input_image = liste
hdr.output_path = 'hdr/fused_merten17'
hdr.compress_quality = 75
hdr.sequence(1, 1, 1, True)

View File

@ -1,12 +1,9 @@
import numpy as np import numpy as np
from PIL import Image
#import matplotlib.pyplot as plt
class NumpyHDR: '''Numpy and PIL implementation of a Mertens Fusion alghoritm
'''Numpy and PIL implementation of a Mertens Fusion alghoritm Usage: Instantiate then set attributes:
Usage: Instantiate then set attributes:
input_image = List containing path strings including .jpg Extension input_image = List containing path strings including .jpg Extension
output_path = String ot Output without jpg ending output_path = String ot Output without jpg ending
compress_quality = 0-100 Jpeg compression level defaults to 75 compress_quality = 0-100 Jpeg compression level defaults to 75
@ -20,32 +17,10 @@ class NumpyHDR:
hdr.compress_quality = 50 hdr.compress_quality = 50
hdr.output_path = photos/result/ hdr.output_path = photos/result/
hdr.sequence() hdr.sequence()
returns: Nothing returns: Nothing
''' '''
def __init__(self): def simple_clip(fused,gamma):
self.input_image: list = []
self.output_path: str = '/'
self.compress_quality: int = 75
def plot_histogram(self, image, title="Histogram", bins=256):
"""Plot the histogram of an image.
Args:
image: A numpy array representing an image.
title: The title of the plot.
bins: The number of bins in the histogram.
"""
fig, ax = plt.subplots()
ax.hist(image.ravel(), bins=bins, color='gray', alpha=0.7)
ax.set_title(title)
ax.set_xlabel('Pixel value')
ax.set_ylabel('Frequency')
plt.show()
### Experimental functions above this line. chatGPT sketches
def simple_clip(self, fused,gamma):
# Apply gamma correction # Apply gamma correction
#fused = np.clip(fused, 0, 1) #fused = np.clip(fused, 0, 1)
fused = np.power(fused, 1.0 / gamma) fused = np.power(fused, 1.0 / gamma)
@ -54,8 +29,7 @@ class NumpyHDR:
#fused = Image.fromarray(fused) #fused = Image.fromarray(fused)
return fused return fused
def convolve2d(image, kernel):
def convolve2d(self, image, kernel):
"""Perform a 2D convolution on the given image with the given kernel. """Perform a 2D convolution on the given image with the given kernel.
Args: Args:
@ -95,7 +69,7 @@ class NumpyHDR:
return convolved_image return convolved_image
def mask(self, img, center=50, width=20, threshold=0.2): def mask(img, center=50, width=20, threshold=0.2):
'''Mask with sigmoid smooth''' '''Mask with sigmoid smooth'''
mask = 1 / (1 + np.exp((center - img) / width)) # Smooth gradient mask mask = 1 / (1 + np.exp((center - img) / width)) # Smooth gradient mask
mask = np.where(img > threshold, mask, 1) # Apply threshold to the mask mask = np.where(img > threshold, mask, 1) # Apply threshold to the mask
@ -103,7 +77,7 @@ class NumpyHDR:
#plot_histogram(mask, title="mask") #plot_histogram(mask, title="mask")
return mask return mask
def highlightsdrop(self, img, center=0.7, width=0.2, threshold=0.6, amount=0.08): def highlightsdrop(img, center=0.7, width=0.2, threshold=0.6, amount=0.08):
'''Mask with sigmoid smooth targets bright sections''' '''Mask with sigmoid smooth targets bright sections'''
mask = 1 / (1 + np.exp((center - img) / width)) # Smooth gradient mask mask = 1 / (1 + np.exp((center - img) / width)) # Smooth gradient mask
mask = np.where(img > threshold, mask, 0) # Apply threshold to the mask mask = np.where(img > threshold, mask, 0) # Apply threshold to the mask
@ -114,7 +88,7 @@ class NumpyHDR:
return img_adjusted return img_adjusted
def shadowlift(self, img, center=0.2, width=0.1, threshold=0.2, amount= 0.05): def shadowlift(img, center=0.2, width=0.1, threshold=0.2, amount= 0.05):
'''Mask with sigmoid smooth targets bright sections''' '''Mask with sigmoid smooth targets bright sections'''
mask = 1 / (1 + np.exp((center - img) / width)) # Smooth gradient mask mask = 1 / (1 + np.exp((center - img) / width)) # Smooth gradient mask
mask = np.where(img < threshold, mask, 0) # Apply threshold to the mask mask = np.where(img < threshold, mask, 0) # Apply threshold to the mask
@ -125,7 +99,7 @@ class NumpyHDR:
return img_adjusted return img_adjusted
def mertens_fusion(self, image_paths, gamma=2.2, contrast_weight=0.2): def mertens_fusion(stack, gamma=1, contrast_weight=1):
"""Fuse multiple exposures into a single HDR image using the Mertens algorithm. """Fuse multiple exposures into a single HDR image using the Mertens algorithm.
Args: Args:
@ -136,14 +110,11 @@ class NumpyHDR:
Returns: Returns:
The fused HDR image. The fused HDR image.
""" """
# Load the input images and convert them to floating-point format.
images = []
for path in image_paths:
img = Image.open(path)
img = img.resize((1280, 720))
img = np.array(img).astype(np.float32) / 255.0
img = np.power(img, gamma)
images = []
for array in stack:
img = np.array(array).astype(np.float32) / 255.0
img = np.power(img, gamma)
images.append(img) images.append(img)
# Compute the weight maps for each input image based on the local contrast. # Compute the weight maps for each input image based on the local contrast.
@ -152,7 +123,7 @@ class NumpyHDR:
for img in images: for img in images:
gray = np.dot(img, [0.2989, 0.5870, 0.1140]) gray = np.dot(img, [0.2989, 0.5870, 0.1140])
kernel = np.array([[-1, -1, -1], [-1, 7, -1], [-1, -1, -1]]) kernel = np.array([[-1, -1, -1], [-1, 7, -1], [-1, -1, -1]])
laplacian = np.abs(self.convolve2d(gray, kernel)) laplacian = np.abs(convolve2d(gray, kernel))
weight = np.power(laplacian, contrast_weight) weight = np.power(laplacian, contrast_weight)
weight_maps.append(weight) weight_maps.append(weight)
@ -168,7 +139,8 @@ class NumpyHDR:
return fused return fused
def compress_dynamic_range(self, image): def compress_dynamic_range(image):
'''Compress dynamic range based on percentile'''
# Find the 1st and 99th percentiles of the image # Find the 1st and 99th percentiles of the image
p1, p99 = np.percentile(image, (0, 99)) p1, p99 = np.percentile(image, (0, 99))
@ -183,7 +155,7 @@ class NumpyHDR:
return new_image return new_image
def compress_dynamic_range_histo(self, image, new_min=0.01, new_max=0.99): def compress_dynamic_range_histo(image, new_min=0.01, new_max=0.99):
"""Compress the dynamic range of an image using histogram stretching. """Compress the dynamic range of an image using histogram stretching.
Args: Args:
@ -208,37 +180,39 @@ class NumpyHDR:
return new_image return new_image
def open_image(filename): def process(stack, gain: float = 1, weight: float = 1, gamma: float = 1, post: bool = True):
# Open the image file in binary mode '''Processes the stack that contains a list of arrays form the camera into a PIL compatible clipped output array
with open(filename, 'rb') as f: Args:
# Read the binary data from the file stack : input list with arrays
binary_data = f.read() gain : low value low contrast, high value high contrast and brightness
weight: How much the extracted portions of each image gets allpied to to the result "HDR effect intensity"
gamma: Post fusion adjustment of the gamma.
post: Enable or disable effects applied after the fusion True or False, default True
shadowlift = slightly lifts the shadows
Args:
center: position of the filter dropoff
width: range of the gradient, softness
threshold: sets the threshhold form 0 to 1 0.1= lowest blacks....
amount: How much the shadows should be lifted. Values under 0.1 seem to be good.
returns:
Hdr image with lifted blacks clipped to 0,1 range
# Convert the binary data to a 1D numpy array of uint8 type compress dynamic range:
image_array = np.frombuffer(binary_data, dtype=np.uint8) Tries to fit the image better into the available range. Less loggy image.
Returns:
HDR Image as PIL compatible array.
# Reshape the 1D array into a 2D array with the correct image shape '''
# (Assuming a 3-channel RGB image with shape (height, width))
height = int.from_bytes(binary_data[16:20], byteorder='big')
width = int.from_bytes(binary_data[20:24], byteorder='big')
image_array = image_array[24:].reshape((height, width, 3))
return image_array
def sequence(self, gain: float = 0.8, weight: float = 0.5, gamma: float = 1, post: bool = True):
'''gain setting : the higher the darker, good range from 0.4- 1.0'''
print(self.input_image)
hdr_image = self.mertens_fusion(self.input_image ,gain, weight)
hdr_image = mertens_fusion(stack ,gain, weight)
if post == True: if post == True:
#hdr_image = self.highlightsdrop(hdr_image) #hdr_image = self.highlightsdrop(hdr_image)
hdr_image = self.shadowlift(hdr_image) hdr_image = shadowlift(hdr_image)
hdr_image = self.compress_dynamic_range(hdr_image) hdr_image = compress_dynamic_range(hdr_image)
#hdr_image = self.compress_dynamic_range_histo(hdr_image) #hdr_image = self.compress_dynamic_range_histo(hdr_image)
hdr_image = self.simple_clip(hdr_image,gamma) hdr_image = simple_clip(hdr_image,gamma)
image = Image.fromarray(hdr_image) return hdr_image
image.save(f"{self.output_path}_hdr.jpg", quality=self.compress_quality)

View File

@ -13,60 +13,66 @@ picam2.set_controls({"AwbEnable": 1})
picam2.set_controls({"AeEnable": 1}) picam2.set_controls({"AeEnable": 1})
picam2.set_controls({"AfMode": controls.AfModeEnum.Manual }) picam2.set_controls({"AfMode": controls.AfModeEnum.Manual })
picam2.set_controls({"LensPosition": 0.0 }) picam2.set_controls({"LensPosition": 0.0 })
#picam2.set_controls({"AnalogueGain": 1.0})
picam2.start()
time.sleep(1)
print(picam2.capture_metadata()) def get_exposure_stack(factor: int = 2):
start = picam2.capture_metadata() '''Returns a list with arrays that contain different exposures controlled by the factor.'''
exposure_start = start["ExposureTime"] '''The Autoamtically set exposure of the first frame is saved and multiplied or divided ba the factor to get the above or under epxosures.'''
gain_start = start["AnalogueGain"]
picam2.set_controls({"AeEnable": 0}) picam2.start()
confirmed = picam2.capture_metadata()["AeLocked"] time.sleep(1)
while confirmed != True:
print(picam2.capture_metadata())
start = picam2.capture_metadata()
exposure_start = start["ExposureTime"]
gain_start = start["AnalogueGain"]
picam2.set_controls({"AeEnable": 0})
confirmed = picam2.capture_metadata()["AeLocked"]
while confirmed != True:
confimed = picam2.capture_metadata()["AeLocked"] confimed = picam2.capture_metadata()["AeLocked"]
time.sleep(.1) time.sleep(.1)
picam2.set_controls({"AnalogueGain": gain_start}) picam2.set_controls({"AnalogueGain": gain_start})
confirmed = picam2.capture_metadata()["AnalogueGain"] confirmed = picam2.capture_metadata()["AnalogueGain"]
while confirmed != gain_start in range(gain_start -0.1, gain_start +0.1): while confirmed != gain_start in range(gain_start -0.1, gain_start +0.1):
confimed = picam2.capture_metadata()["AnalogueGain"] confimed = picam2.capture_metadata()["AnalogueGain"]
time.sleep(.1) time.sleep(.1)
ev1 = picam2.capture_array() ev1 = picam2.capture_array()
#print("Picture one is done") #print("Picture one is done")
ev_low = int(exposure_start / 2) ev_low = int(exposure_start / factor)
picam2.set_controls({"ExposureTime": ev_low}) picam2.set_controls({"ExposureTime": ev_low})
confirmed = picam2.capture_metadata()["ExposureTime"] confirmed = picam2.capture_metadata()["ExposureTime"]
while confirmed not in range(ev_low -100, ev_low + 100 ): while confirmed not in range(ev_low -100, ev_low + 100 ):
confirmed = picam2.capture_metadata()["ExposureTime"] confirmed = picam2.capture_metadata()["ExposureTime"]
time.sleep(.01) time.sleep(.01)
#print("2",confirmed) #print("2",confirmed)
ev2 = picam2.capture_array() ev2 = picam2.capture_array()
#print("Picture 2 is captured to array") #print("Picture 2 is captured to array")
ev_high = int(exposure_start * 2) ev_high = int(exposure_start * factor)
picam2.set_controls({"ExposureTime": ev_high}) picam2.set_controls({"ExposureTime": ev_high})
confirmed = picam2.capture_metadata()["ExposureTime"] confirmed = picam2.capture_metadata()["ExposureTime"]
while confirmed not in range(ev_high -100, ev_high + 100 ): while confirmed not in range(ev_high -100, ev_high + 100 ):
confirmed = picam2.capture_metadata()["ExposureTime"] confirmed = picam2.capture_metadata()["ExposureTime"]
time.sleep(.01) time.sleep(.01)
#print("3",confirmed) #print("3",confirmed)
ev3 = picam2.capture_array() ev3 = picam2.capture_array()
#print("Picture 3 is captured") #print("Picture 3 is captured")
print("Saving..")
picam2.stop()
stack = [ev1,ev2,ev3]
return stack
image = Image.fromarray(ev1)
image.save(f"test_hdr0.jpg", quality=50)
image = Image.fromarray(ev2)
image.save(f"test_hdr1.jpg", quality=50)
image = Image.fromarray(ev3)
image.save(f"test_hdr2.jpg", quality=50)
picam2.stop()