From df57f91beb9b3dde490f822a597af10999e62288 Mon Sep 17 00:00:00 2001 From: matthewverde Date: Fri, 14 Jun 2024 20:00:01 -0700 Subject: [PATCH 1/9] mattg: edits to allow rpicam_file example to work on hard coded values, and draw image to a canvas --- examples/main.py | 18 ++++++++ examples/rpicam_file.py | 98 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 109 insertions(+), 7 deletions(-) create mode 100644 examples/main.py diff --git a/examples/main.py b/examples/main.py new file mode 100644 index 0000000..3821047 --- /dev/null +++ b/examples/main.py @@ -0,0 +1,18 @@ +import subprocess +from threading import Thread +import datetime +import os + +def record() { + recordingTime = '1000' + framerate = '15' + gain = '5.0' + filename = 'image' + + now = datetime.datetime.now() + os.mkdir(now) + cwd = os.getcwd() + outputLocation = os.path.join(cwd, f'{now}/{filename}%05d.raw') + + process = subprocess.Popen(['rpicam-raw', f'-t {recordingTime}', '--segment 1', f'-o {outputLocation}', f'--framerate {framerate}', f'--gain {gain}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE +} \ No newline at end of file diff --git a/examples/rpicam_file.py b/examples/rpicam_file.py index 05e0ecd..85eefca 100644 --- a/examples/rpicam_file.py +++ b/examples/rpicam_file.py @@ -1,27 +1,111 @@ from pidng.core import RPICAM2DNG, DNGTags, Tag from pidng.camdefs import * import numpy as np +import tkinter +from tkinter import * + +def getRGBHex(value, color): + hexVal = hex(value)[2:] + + if(len(hexVal) == 0): + hexVal = '000' + if(len(hexVal) == 1): + hexVal = f'00{hexVal}' + if(len(hexVal) == 2): + hexVal = f'0{hexVal}' + + if(color == 'r'): + return f'#{hexVal}000000' + if(color == 'g'): + return f'#000{hexVal}000' + if(color == 'b'): + return f'#000000{hexVal}' + +master = Tk() +w = Canvas(master, width=1700, height=800) +w.pack() +canvas_height=3040 +canvas_width=4056 +y = int(canvas_height / 2) +for i in range(0, 100): + w.create_line(0, i, 200, i, fill=getRGBHex(900, 'g')) # load raw image data from file into numpy array. RAW frame from HQ camera. -img = "hq_camera_frame.raw" +img = '../raw-test-full-sensor/test00000.raw' data = np.fromfile(img, dtype=np.uint8) +# file size 4669440 +# (1520 height) * (((2028 width * 12 bits per pixel) / 8 bits per byte) + 30 extra bytes*) = 4669440 bytes +# (((2028 * 12) / 8) + 30) = 3072 +# bytes*: strolls with dog claims it should only by 28 extra bytes https://www.strollswithmydog.com/open-raspberry-pi-high-quality-camera-raw/ +# tuple is in the form row, col +# data = data.reshape((1520, 3072)) +data = data.reshape((3040, 6112)) +# for i in range(6112 - 50, 6112): +# print("{:08b}".format(data[1][i])) + +for i in range(0, 12): + print("{:08b}".format(data[0][i])) # choose a predefined camera model, set the sensor mode and bayer layout. # this camera model class sets the appropriate DNG's tags needed based on the camera sensor. ( needed for bit unpacking, color matrices ) -camera = RaspberryPiHqCamera(1, CFAPattern.BGGR) +camera = RaspberryPiHqCamera(3, CFAPattern.BGGR) + +# if self.mode == 1: +# width = 2028 +# height = 1080 +# if self.mode == 2: +# width = 2028 +# height = 1520 +# if self.mode == 3: +# width = 4056 +# height = 3040 +# if self.mode == 4: +# width = 1012 +# height = 760 # example of adding custom DNG tags to predefined tags from camera model camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 -camera.tags.set(Tag.ExposureTime, [[1,400]]) # SHUTTER 1/400 -camera.tags.set(Tag.PhotographicSensitivity, [400]) # ISO 400 +camera.tags.set(Tag.ExposureTime, [[1,300]]) # SHUTTER 1/400 +camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 +camera.fmt = dict({ + # tuple is in the form width height + 'size': (4056,3040), + 'bpp': 12, + 'format': 'SRGGB12_CSI2P' +}) +# 4095 # example of passing a filter over the rawframe before it is saved. This will simply print the dimensions of the raw image. def print_dimensions(rawFrame): - print(rawFrame.shape) + height, width = rawFrame.shape + for x in range(0, 1700): # width + for y in range(0, 800): # height + if(y % 2 == 0): + if(x % 2 == 0): # blue + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'b')) + else: # green + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'g')) + else: + if(x % 2 == 0): # green + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'g')) + else: # red + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'r')) + + print(f'width: {width}') + print(f'height {height}') + # print(rawFrame[0,0]) return rawFrame # pass camera reference into the converter. r = RPICAM2DNG(camera) -r.options(path="", compress=True) +r.options(path="C:/Users/matth/code/PiDNG/raw-test-full-sensor", compress=False) r.filter = print_dimensions -r.convert(data, filename="output") +r.convert(data, filename="./full-sensor-test") +unpackedPixel = r.__unpack_pixels__(data=data) +# print('first pixel') +# print(unpackedPixel[0,0]) +# print('second pixel') +# print(unpackedPixel[0,1]) +# print('third pixel') +# print(unpackedPixel[0,2]) +mainloop() From 94977d6e928df0c7530fffe27b146103b54c89db Mon Sep 17 00:00:00 2001 From: matthewverde Date: Sat, 15 Jun 2024 14:13:02 -0700 Subject: [PATCH 2/9] mattg: expand on the recording wrapper --- examples/main.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/examples/main.py b/examples/main.py index 3821047..723a431 100644 --- a/examples/main.py +++ b/examples/main.py @@ -3,16 +3,21 @@ import datetime import os -def record() { +DEFAULT_WRITE_LOCATION = '/media/matthewverde/RAW/' + +def record(): recordingTime = '1000' framerate = '15' gain = '5.0' filename = 'image' now = datetime.datetime.now() - os.mkdir(now) - cwd = os.getcwd() - outputLocation = os.path.join(cwd, f'{now}/{filename}%05d.raw') + newDirName = f'rpicam-raw-{now.day}-{now.month}-{now.year}--{now.hour}.{now.minute}.{now.second}' + newDirLocation = os.path.join(DEFAULT_WRITE_LOCATION, newDirName) + os.mkdir(newDirLocation) + outputLocation = os.path.join(newDirLocation, f'{filename}%05d.raw') + + process = subprocess.run(['rpicam-raw', '-t', recordingTime, '--segment', '1', '-o', outputLocation, '--framerate', framerate, '--gain', gain], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + print(process) - process = subprocess.Popen(['rpicam-raw', f'-t {recordingTime}', '--segment 1', f'-o {outputLocation}', f'--framerate {framerate}', f'--gain {gain}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE -} \ No newline at end of file +record() \ No newline at end of file From 1b11085e8accbb63fe3c545592a47e6a3c8fef12 Mon Sep 17 00:00:00 2001 From: matthewverde Date: Mon, 17 Jun 2024 16:45:16 -0700 Subject: [PATCH 3/9] mattg: add script for switching B+G pixels to value of nearest red neighbor, and separate canvas code into different script --- examples/rpicam_bg_red_inheritance.py | 74 +++++++++++++++++++++ examples/rpicam_draw_to_canvas.py | 94 +++++++++++++++++++++++++++ examples/rpicam_file.py | 64 ------------------ 3 files changed, 168 insertions(+), 64 deletions(-) create mode 100644 examples/rpicam_bg_red_inheritance.py create mode 100644 examples/rpicam_draw_to_canvas.py diff --git a/examples/rpicam_bg_red_inheritance.py b/examples/rpicam_bg_red_inheritance.py new file mode 100644 index 0000000..78b4340 --- /dev/null +++ b/examples/rpicam_bg_red_inheritance.py @@ -0,0 +1,74 @@ +from pidng.core import RPICAM2DNG, DNGTags, Tag +from pidng.camdefs import * +import numpy as np + +# load raw image data from file into numpy array. RAW frame from HQ camera. +img = '../raw-test-full-sensor/test00000.raw' +data = np.fromfile(img, dtype=np.uint8) +# file size 4669440 +# (1520 height) * (((2028 width * 12 bits per pixel) / 8 bits per byte) + 30 extra bytes*) = 4669440 bytes +# (((2028 * 12) / 8) + 30) = 3072 +# bytes*: strolls with dog claims it should only by 28 extra bytes https://www.strollswithmydog.com/open-raspberry-pi-high-quality-camera-raw/ +# tuple is in the form row, col +# data = data.reshape((1520, 3072)) +data = data.reshape((3040, 6112)) + +# choose a predefined camera model, set the sensor mode and bayer layout. +# this camera model class sets the appropriate DNG's tags needed based on the camera sensor. ( needed for bit unpacking, color matrices ) +# camera = RaspberryPiHqCamera(3, CFAPattern.BGGR) +# [0, 0, 0, 0] is equal to a RRRR monochrome filter +# in order for this to work, we need to rewrite the blue and green pixels +camera = RaspberryPiHqCamera(3, [0, 0, 0, 0]) + +# if self.mode == 1: +# width = 2028 +# height = 1080 +# if self.mode == 2: +# width = 2028 +# height = 1520 +# if self.mode == 3: +# width = 4056 +# height = 3040 +# if self.mode == 4: +# width = 1012 +# height = 760 + +# example of adding custom DNG tags to predefined tags from camera model +camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 +camera.tags.set(Tag.ExposureTime, [[1,300]]) # SHUTTER 1/400 +camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 +camera.fmt = dict({ + # tuple is in the form width height + 'size': (4056,3040), + 'bpp': 12, + 'format': 'SRGGB12_CSI2P' +}) + +def redShiftFilter(rawFrame): + height, width = rawFrame.shape + for x in range(0, width): # width + for y in range(0, height): # height + if(y % 2 == 0): + if(x % 2 == 0): # blue + # get nearest red + targetRedX = x + 1 + targetRedY = y + 1 + else: # green + targetRedX = x + targetRedY = y + 1 + else: + if(x % 2 == 0): # green + targetRedX = x + 1 + targetRedY = y + else: # red + targetRedX = x + targetRedY = y + rawFrame[y][x] = rawFrame[targetRedY][targetRedX] + return rawFrame + +# pass camera reference into the converter. +r = RPICAM2DNG(camera) +r.options(path="C:/Users/matth/code/PiDNG/raw-test-full-sensor", compress=False) +r.filter = redShiftFilter +r.convert(data, filename="./full-sensor-test-red-inheritance") +unpackedPixel = r.__unpack_pixels__(data=data) diff --git a/examples/rpicam_draw_to_canvas.py b/examples/rpicam_draw_to_canvas.py new file mode 100644 index 0000000..3bd7d62 --- /dev/null +++ b/examples/rpicam_draw_to_canvas.py @@ -0,0 +1,94 @@ +from pidng.core import RPICAM2DNG, DNGTags, Tag +from pidng.camdefs import * +import numpy as np +import tkinter +from tkinter import * + +def getRGBHex(value, color): + hexVal = hex(value)[2:] + + if(len(hexVal) == 0): + hexVal = '000' + if(len(hexVal) == 1): + hexVal = f'00{hexVal}' + if(len(hexVal) == 2): + hexVal = f'0{hexVal}' + + if(color == 'r'): + return f'#{hexVal}000000' + if(color == 'g'): + return f'#000{hexVal}000' + if(color == 'b'): + return f'#000000{hexVal}' + +master = Tk() +w = Canvas(master, width=1700, height=800) +w.pack() +canvas_height=3040 +canvas_width=4056 + +# load raw image data from file into numpy array. RAW frame from HQ camera. +img = '../raw-test-full-sensor/test00000.raw' +data = np.fromfile(img, dtype=np.uint8) +# file size 4669440 +# (1520 height) * (((2028 width * 12 bits per pixel) / 8 bits per byte) + 30 extra bytes*) = 4669440 bytes +# (((2028 * 12) / 8) + 30) = 3072 +# bytes*: strolls with dog claims it should only by 28 extra bytes https://www.strollswithmydog.com/open-raspberry-pi-high-quality-camera-raw/ +# tuple is in the form row, col +# data = data.reshape((1520, 3072)) +data = data.reshape((3040, 6112)) + +# choose a predefined camera model, set the sensor mode and bayer layout. +# this camera model class sets the appropriate DNG's tags needed based on the camera sensor. ( needed for bit unpacking, color matrices ) +camera = RaspberryPiHqCamera(3, CFAPattern.BGGR) + +# if self.mode == 1: +# width = 2028 +# height = 1080 +# if self.mode == 2: +# width = 2028 +# height = 1520 +# if self.mode == 3: +# width = 4056 +# height = 3040 +# if self.mode == 4: +# width = 1012 +# height = 760 + +# example of adding custom DNG tags to predefined tags from camera model +camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 +camera.tags.set(Tag.ExposureTime, [[1,300]]) # SHUTTER 1/400 +camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 +camera.fmt = dict({ + # tuple is in the form width height + 'size': (4056,3040), + 'bpp': 12, + 'format': 'SRGGB12_CSI2P' +}) + +# 4095 +# example of passing a filter over the rawframe before it is saved. This will simply print the dimensions of the raw image. +def drawCanvasFilter(rawFrame): + height, width = rawFrame.shape + for x in range(0, 600): # width + for y in range(0, 400): # height + if(y % 2 == 0): + if(x % 2 == 0): # blue + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'b')) + else: # green + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'g')) + else: + if(x % 2 == 0): # green + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'g')) + else: # red + w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'r')) + return rawFrame + +# pass camera reference into the converter. +r = RPICAM2DNG(camera) +r.options(path="C:/Users/matth/code/PiDNG/raw-test-full-sensor", compress=False) +r.filter = drawCanvasFilter +r.convert(data, filename="./full-sensor-test-canvas") +unpackedPixel = r.__unpack_pixels__(data=data) + +mainloop() diff --git a/examples/rpicam_file.py b/examples/rpicam_file.py index 85eefca..fccc594 100644 --- a/examples/rpicam_file.py +++ b/examples/rpicam_file.py @@ -1,34 +1,6 @@ from pidng.core import RPICAM2DNG, DNGTags, Tag from pidng.camdefs import * import numpy as np -import tkinter -from tkinter import * - -def getRGBHex(value, color): - hexVal = hex(value)[2:] - - if(len(hexVal) == 0): - hexVal = '000' - if(len(hexVal) == 1): - hexVal = f'00{hexVal}' - if(len(hexVal) == 2): - hexVal = f'0{hexVal}' - - if(color == 'r'): - return f'#{hexVal}000000' - if(color == 'g'): - return f'#000{hexVal}000' - if(color == 'b'): - return f'#000000{hexVal}' - -master = Tk() -w = Canvas(master, width=1700, height=800) -w.pack() -canvas_height=3040 -canvas_width=4056 -y = int(canvas_height / 2) -for i in range(0, 100): - w.create_line(0, i, 200, i, fill=getRGBHex(900, 'g')) # load raw image data from file into numpy array. RAW frame from HQ camera. img = '../raw-test-full-sensor/test00000.raw' @@ -40,11 +12,6 @@ def getRGBHex(value, color): # tuple is in the form row, col # data = data.reshape((1520, 3072)) data = data.reshape((3040, 6112)) -# for i in range(6112 - 50, 6112): -# print("{:08b}".format(data[1][i])) - -for i in range(0, 12): - print("{:08b}".format(data[0][i])) # choose a predefined camera model, set the sensor mode and bayer layout. # this camera model class sets the appropriate DNG's tags needed based on the camera sensor. ( needed for bit unpacking, color matrices ) @@ -74,38 +41,7 @@ def getRGBHex(value, color): 'format': 'SRGGB12_CSI2P' }) -# 4095 -# example of passing a filter over the rawframe before it is saved. This will simply print the dimensions of the raw image. -def print_dimensions(rawFrame): - height, width = rawFrame.shape - for x in range(0, 1700): # width - for y in range(0, 800): # height - if(y % 2 == 0): - if(x % 2 == 0): # blue - w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'b')) - else: # green - w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'g')) - else: - if(x % 2 == 0): # green - w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'g')) - else: # red - w.create_line(x, y, x+1, y, fill=getRGBHex(rawFrame[y][x], 'r')) - - print(f'width: {width}') - print(f'height {height}') - # print(rawFrame[0,0]) - return rawFrame - # pass camera reference into the converter. r = RPICAM2DNG(camera) r.options(path="C:/Users/matth/code/PiDNG/raw-test-full-sensor", compress=False) -r.filter = print_dimensions r.convert(data, filename="./full-sensor-test") -unpackedPixel = r.__unpack_pixels__(data=data) -# print('first pixel') -# print(unpackedPixel[0,0]) -# print('second pixel') -# print(unpackedPixel[0,1]) -# print('third pixel') -# print(unpackedPixel[0,2]) -mainloop() From 251606a97cbc0c977691e9a1b2f2fa7d64b7e976 Mon Sep 17 00:00:00 2001 From: matthewverde Date: Tue, 18 Jun 2024 18:00:23 -0700 Subject: [PATCH 4/9] mattg: add app for managing recording of rpicam and converting raw files --- examples/main.py | 173 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 159 insertions(+), 14 deletions(-) diff --git a/examples/main.py b/examples/main.py index 723a431..6829654 100644 --- a/examples/main.py +++ b/examples/main.py @@ -2,22 +2,167 @@ from threading import Thread import datetime import os +import cmd -DEFAULT_WRITE_LOCATION = '/media/matthewverde/RAW/' +from pidng.core import RPICAM2DNG, DNGTags, Tag +from pidng.camdefs import * +import numpy as np -def record(): - recordingTime = '1000' - framerate = '15' - gain = '5.0' - filename = 'image' +class BayerFilterValue: + B = 2 + G = 1 + R = 0 - now = datetime.datetime.now() - newDirName = f'rpicam-raw-{now.day}-{now.month}-{now.year}--{now.hour}.{now.minute}.{now.second}' - newDirLocation = os.path.join(DEFAULT_WRITE_LOCATION, newDirName) - os.mkdir(newDirLocation) - outputLocation = os.path.join(newDirLocation, f'{filename}%05d.raw') +class RpiCam(): + DEFAULT_WRITE_LOCATION = '/media/matthewverde/RAW/' + def __init__(self, shutterSpeed, analogGain, framerate, recordingTime): + self.shutterSpeed = shutterSpeed + self.analogGain = analogGain + self.framerate = framerate + self.recordingTime = recordingTime + self.outputLocation = self.DEFAULT_WRITE_LOCATION + self.rawFilePrefix = f'ss{self.shutterSpeed}gain{self.analogGain}---' + self.cameraName = 'imx477' + self.bit = '12' + self.format = 'SRGGB12_CSI2P' + self.setBayerFilter('BGGR') + self.setMode(3) - process = subprocess.run(['rpicam-raw', '-t', recordingTime, '--segment', '1', '-o', outputLocation, '--framerate', framerate, '--gain', gain], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - print(process) + def getParentDirectoryForRecording(self): + now = datetime.datetime.now() + newDirName = f'rpicam-raw-{now.day}-{now.month}-{now.year}--{now.hour}.{now.minute}.{now.second}' + newDirLocation = os.path.join(self.outputLocation, newDirName) + os.mkdir(newDirLocation) + return newDirLocation -record() \ No newline at end of file + def getRpiCamOutputFilePath(self, parentPath): + return os.path.join(parentPath, f'{self.rawFilePrefix}%05d.raw') + + def setBayerFilter(self, colorPattern): + pattern = [] + for color in colorPattern: + if color == 'B' or color == 'b': + pattern.append(BayerFilterValue.B) + elif color == 'G' or color == 'g': + pattern.append(BayerFilterValue.G) + elif color == 'R' or color == 'r': + pattern.append(BayerFilterValue.R) + self.bayerFilter = pattern + self.bayerFilterString = colorPattern + + def setMode(self, modeToSet): + if(modeToSet == 1): + self.mode = 1 + self.width = '2028' + self.height = '1080' + self.stride = '3072' + elif(modeToSet == 2): + self.mode = 2 + self.width = '2028' + self.height = '1520' + self.stride = '3072' + elif(modeToSet == 3): + self.mode = 3 + self.width = '4056' + self.height = '3040' + self.stride = '6112' + else: + print(f'mode but be the value 1, 2, or 3. Got: {modeToSet}') + + def printConfig(self): + print(f''' + *** Cam Config ***\n + camera name: {self.cameraName}\n + bayer filter: {self.bayerFilterString}\n + shutter speed: {self.shutterSpeed}\n + analog gain: {self.analogGain}\n + frame rate: {self.framerate}\n + recording time: {self.recordingTime}\n + size: {self.width}x{self.height}\n + mode: {self.mode}\n + bit: {self.bit} + ''') + + def record(self): + parentDir = self.getParentDirectoryForRecording() + rawOutputFilePath = self.getRpiCamOutputFilePath(parentDir) + processArguments = ['rpicam-raw', '-t', self.recordingTime, '--segment', '1', '-o', rawOutputFilePath, '--framerate', self.framerate, '--gain', self.analogGain, '--shutter', self.shutterSpeed, '--width', self.width, '--height', self.height] + print('*** Starting Recording ***') + process = subprocess.run(processArguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + print('*** DONE ***') + print('*** Converting raw files to dng ***') + dngConverter = PiDngManager(self) + dngConverter.convert(parentDir, True) + + + +class CommandHandler(cmd.Cmd): + prompt = ">>> " + cam = RpiCam('100', '1.0', '10', '1000') + + def __init__(self, cam): + super().__init__() + self.cam = cam + + def do_framerate(self, arg): + self.cam.framerate = arg + + def do_gain(self, arg): + self.cam.analogGain = arg + + def do_rec_time(self, arg): + self.cam.recordingTime = arg + + def do_shutter_speed(self, arg): + self.cam.shutterSpeed = arg + + def do_config(self, arg): + self.cam.printConfig() + + def do_rec(self, arg): + self.cam.record() + + def do_mode(self, arg): + self.cam.setMode(int(arg)) + + def do_filter(self, arg): + self.cam.setBayerFilter(arg) + + def do_exit(self, arg): + return True + + + +class PiDngManager(): + def __init__(self, cam): + self.cam = cam + + def convert(self, dirToConvert, convertAll): + camera = RaspberryPiHqCamera(self.cam.mode, self.cam.bayerFilter) + camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 + camera.tags.set(Tag.ExposureTime, [[1,300]]) # SHUTTER 1/400 + camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 + camera.fmt = dict({ + # tuple is in the form width height + 'size': (int(self.cam.width), int(self.cam.height)), + 'bpp': int(self.cam.bit), + 'format': self.cam.format + }) + # pass camera reference into the converter. + r = RPICAM2DNG(camera) + r.options(path=dirToConvert, compress=False) + print(f'Converting raw files in {dirToConvert} to DNG') + for dirpath, dirnames, filenames in os.walk(dirToConvert): + if(convertAll): + for filename in filenames: + data = np.fromfile(os.path.join(dirToConvert, filename), dtype=np.uint8) + data = data.reshape((int(self.cam.height), int(self.cam.stride))) + r.convert(data, filename=os.path.join(dirToConvert, filename)) + print(f'Conversion Done') + +def main(): + cam = RpiCam('100', '1.0', '10', '1000') + commandHandler = CommandHandler(cam) + commandHandler.cmdloop() + +main() \ No newline at end of file From 5dc0febb9c97072f5965825bfe8ec3858c09d6ed Mon Sep 17 00:00:00 2001 From: matthewverde Date: Tue, 18 Jun 2024 23:23:59 -0700 Subject: [PATCH 5/9] Set reasonable default shutter speed, and RRRR converter, small raw file name tweaks --- examples/main.py | 62 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/examples/main.py b/examples/main.py index 6829654..744dd82 100644 --- a/examples/main.py +++ b/examples/main.py @@ -1,4 +1,5 @@ import subprocess +from pathlib import Path from threading import Thread import datetime import os @@ -21,7 +22,7 @@ def __init__(self, shutterSpeed, analogGain, framerate, recordingTime): self.framerate = framerate self.recordingTime = recordingTime self.outputLocation = self.DEFAULT_WRITE_LOCATION - self.rawFilePrefix = f'ss{self.shutterSpeed}gain{self.analogGain}---' + self.setRawFilePrefix() self.cameraName = 'imx477' self.bit = '12' self.format = 'SRGGB12_CSI2P' @@ -36,7 +37,7 @@ def getParentDirectoryForRecording(self): return newDirLocation def getRpiCamOutputFilePath(self, parentPath): - return os.path.join(parentPath, f'{self.rawFilePrefix}%05d.raw') + return os.path.join(parentPath, f'{self.bayerFilterString}{self.rawFilePrefix}%05d.raw') def setBayerFilter(self, colorPattern): pattern = [] @@ -50,6 +51,17 @@ def setBayerFilter(self, colorPattern): self.bayerFilter = pattern self.bayerFilterString = colorPattern + def setShutterSpeed(self, value): + self.shutterSpeed = value + self.setRawFilePrefix() + + def setGain(self, value): + self.analogGain = value + self.setRawFilePrefix() + + def setRawFilePrefix(self): + self.rawFilePrefix = f'ss{self.shutterSpeed}gain{self.analogGain}---' + def setMode(self, modeToSet): if(modeToSet == 1): self.mode = 1 @@ -87,9 +99,11 @@ def record(self): parentDir = self.getParentDirectoryForRecording() rawOutputFilePath = self.getRpiCamOutputFilePath(parentDir) processArguments = ['rpicam-raw', '-t', self.recordingTime, '--segment', '1', '-o', rawOutputFilePath, '--framerate', self.framerate, '--gain', self.analogGain, '--shutter', self.shutterSpeed, '--width', self.width, '--height', self.height] + # processArguments = ['rpicam-raw', '-t', self.recordingTime, '--segment', '1', '-o', rawOutputFilePath, '--framerate', self.framerate, '--gain', self.analogGain, '--width', self.width, '--height', self.height] print('*** Starting Recording ***') + print(f'cmd: {" ".join(processArguments)}') process = subprocess.run(processArguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - print('*** DONE ***') + print('*** Recording Finshed ***') print('*** Converting raw files to dng ***') dngConverter = PiDngManager(self) dngConverter.convert(parentDir, True) @@ -98,7 +112,6 @@ def record(self): class CommandHandler(cmd.Cmd): prompt = ">>> " - cam = RpiCam('100', '1.0', '10', '1000') def __init__(self, cam): super().__init__() @@ -108,13 +121,13 @@ def do_framerate(self, arg): self.cam.framerate = arg def do_gain(self, arg): - self.cam.analogGain = arg + self.cam.setGain(arg) def do_rec_time(self, arg): self.cam.recordingTime = arg def do_shutter_speed(self, arg): - self.cam.shutterSpeed = arg + self.cam.setShutterSpeed(arg) def do_config(self, arg): self.cam.printConfig() @@ -151,17 +164,48 @@ def convert(self, dirToConvert, convertAll): # pass camera reference into the converter. r = RPICAM2DNG(camera) r.options(path=dirToConvert, compress=False) + if(self.cam.bayerFilter == [BayerFilterValue.R, BayerFilterValue.R, BayerFilterValue.R, BayerFilterValue.R]): + r.filter = self.redShiftFilter + else: + r.filter = self.noOpFilter print(f'Converting raw files in {dirToConvert} to DNG') for dirpath, dirnames, filenames in os.walk(dirToConvert): if(convertAll): for filename in filenames: + print(f'Converting: {filename} to DNG') + fileNameDng = Path(filename).with_suffix(".dng") data = np.fromfile(os.path.join(dirToConvert, filename), dtype=np.uint8) data = data.reshape((int(self.cam.height), int(self.cam.stride))) - r.convert(data, filename=os.path.join(dirToConvert, filename)) - print(f'Conversion Done') + r.convert(data, filename=os.path.join(dirToConvert, fileNameDng)) + + def noOpFilter(self, rawFrame): + return rawFrame + + def redShiftFilter(self, rawFrame): + height, width = rawFrame.shape + for x in range(0, width): # width + for y in range(0, height): # height + if(y % 2 == 0): + if(x % 2 == 0): # blue + # get nearest red + targetRedX = x + 1 + targetRedY = y + 1 + else: # green + targetRedX = x + targetRedY = y + 1 + else: + if(x % 2 == 0): # green + targetRedX = x + 1 + targetRedY = y + else: # red + targetRedX = x + targetRedY = y + rawFrame[y][x] = rawFrame[targetRedY][targetRedX] + return rawFrame def main(): - cam = RpiCam('100', '1.0', '10', '1000') + # shutterSpeed (microseconds), analogGain, framerate, recordingTime + cam = RpiCam('100000', '1.0', '10', '1000') commandHandler = CommandHandler(cam) commandHandler.cmdloop() From f78735f4f2153f177c2bef3a5d1f8950a1edc79a Mon Sep 17 00:00:00 2001 From: matthewverde Date: Wed, 19 Jun 2024 11:28:42 -0700 Subject: [PATCH 6/9] mattg: add more advanced red filter, add commands for not doing conversion --- examples/main.py | 53 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 43 insertions(+), 10 deletions(-) diff --git a/examples/main.py b/examples/main.py index 744dd82..8902dc3 100644 --- a/examples/main.py +++ b/examples/main.py @@ -28,6 +28,8 @@ def __init__(self, shutterSpeed, analogGain, framerate, recordingTime): self.format = 'SRGGB12_CSI2P' self.setBayerFilter('BGGR') self.setMode(3) + self.quickFilter = 't' + self.convertToDng = 't' def getParentDirectoryForRecording(self): now = datetime.datetime.now() @@ -99,14 +101,14 @@ def record(self): parentDir = self.getParentDirectoryForRecording() rawOutputFilePath = self.getRpiCamOutputFilePath(parentDir) processArguments = ['rpicam-raw', '-t', self.recordingTime, '--segment', '1', '-o', rawOutputFilePath, '--framerate', self.framerate, '--gain', self.analogGain, '--shutter', self.shutterSpeed, '--width', self.width, '--height', self.height] - # processArguments = ['rpicam-raw', '-t', self.recordingTime, '--segment', '1', '-o', rawOutputFilePath, '--framerate', self.framerate, '--gain', self.analogGain, '--width', self.width, '--height', self.height] print('*** Starting Recording ***') print(f'cmd: {" ".join(processArguments)}') process = subprocess.run(processArguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print('*** Recording Finshed ***') - print('*** Converting raw files to dng ***') - dngConverter = PiDngManager(self) - dngConverter.convert(parentDir, True) + if(self.convertToDng == 't'): + print('*** Converting raw files to dng ***') + dngConverter = PiDngManager(self) + dngConverter.convert(parentDir, True) @@ -141,10 +143,16 @@ def do_mode(self, arg): def do_filter(self, arg): self.cam.setBayerFilter(arg) + def do_quick_filter(self, arg): + self.cam.quickFilter = arg + + def do_convert(self, arg): + self.cam.convertToDng = arg + def do_exit(self, arg): return True - +MICROSECONDS_PER_SECOND = 1000000 class PiDngManager(): def __init__(self, cam): @@ -152,9 +160,10 @@ def __init__(self, cam): def convert(self, dirToConvert, convertAll): camera = RaspberryPiHqCamera(self.cam.mode, self.cam.bayerFilter) - camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 - camera.tags.set(Tag.ExposureTime, [[1,300]]) # SHUTTER 1/400 - camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 + # camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 + # camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 + exposureTimeDenominator = int(int(self.cam.shutterSpeed) / MICROSECONDS_PER_SECOND) + camera.tags.set(Tag.ExposureTime, [[1,exposureTimeDenominator]]) camera.fmt = dict({ # tuple is in the form width height 'size': (int(self.cam.width), int(self.cam.height)), @@ -165,7 +174,11 @@ def convert(self, dirToConvert, convertAll): r = RPICAM2DNG(camera) r.options(path=dirToConvert, compress=False) if(self.cam.bayerFilter == [BayerFilterValue.R, BayerFilterValue.R, BayerFilterValue.R, BayerFilterValue.R]): - r.filter = self.redShiftFilter + if(self.cam.quickFilter != 't'): + # slow as shit + r.filter = self.medianRedShiftFilter + else: + r.filter = self.redShiftFilter else: r.filter = self.noOpFilter print(f'Converting raw files in {dirToConvert} to DNG') @@ -203,10 +216,30 @@ def redShiftFilter(self, rawFrame): rawFrame[y][x] = rawFrame[targetRedY][targetRedX] return rawFrame + # slow as shit + def medianRedShiftFilter(self, rawFrame): + height, width = rawFrame.shape + for col in range(1, width): # width + for row in range(1, height): # height + if(row % 2 == 0): + if(col % 2 == 0): # blue + # get nearest red + reds = [rawFrame[row - 1][col - 1], rawFrame[row - 1][col + 1], rawFrame[row + 1][col - 1], rawFrame[row + 1][col + 1]] + else: # green + reds = [rawFrame[row - 1][col], rawFrame[row + 1][col]] + else: + if(col % 2 == 0): # green + reds = [rawFrame[row][col - 1], rawFrame[row][col + 1]] + else: # red + reds = [rawFrame[row][col]] + average = int(sum(reds) / len(reds)) + rawFrame[row][col] = average + return rawFrame + def main(): # shutterSpeed (microseconds), analogGain, framerate, recordingTime cam = RpiCam('100000', '1.0', '10', '1000') commandHandler = CommandHandler(cam) commandHandler.cmdloop() -main() \ No newline at end of file +main() From fce0ddccba36220bc52390a8fa1d50b6586a57a3 Mon Sep 17 00:00:00 2001 From: matthewverde Date: Sat, 6 Jul 2024 20:09:22 -0700 Subject: [PATCH 7/9] mattg: add in too many changes --- examples/analyzePixels.py | 93 +++++++++++++++ examples/app.py | 62 ++++++++++ examples/main.py | 21 +++- examples/raw2dng.py | 8 +- examples/rpicam_bg_red_inheritance.py | 157 +++++++++++++++++++++++++- 5 files changed, 328 insertions(+), 13 deletions(-) create mode 100644 examples/analyzePixels.py create mode 100644 examples/app.py diff --git a/examples/analyzePixels.py b/examples/analyzePixels.py new file mode 100644 index 0000000..9d6ecc2 --- /dev/null +++ b/examples/analyzePixels.py @@ -0,0 +1,93 @@ +from pidng.core import RPICAM2DNG, DNGTags, Tag +from pidng.camdefs import * +import numpy as np +import matplotlib.pyplot as plt + +# load raw image data from file into numpy array. RAW frame from HQ camera. +img = '../rpicam-raw-20-6-2024--13.0.24\BGGRss1200gain0.1---00000.raw' +data = np.fromfile(img, dtype=np.uint8) +# file size 4669440 +# (1520 height) * (((2028 width * 12 bits per pixel) / 8 bits per byte) + 30 extra bytes*) = 4669440 bytes +# (((2028 * 12) / 8) + 30) = 3072 +# bytes*: strolls with dog claims it should only by 28 extra bytes https://www.strollswithmydog.com/open-raspberry-pi-high-quality-camera-raw/ +# tuple is in the form row, col +# data = data.reshape((1520, 3072)) +data = data.reshape((3040, 6112)) + +# choose a predefined camera model, set the sensor mode and bayer layout. +# this camera model class sets the appropriate DNG's tags needed based on the camera sensor. ( needed for bit unpacking, color matrices ) +# camera = RaspberryPiHqCamera(3, CFAPattern.BGGR) +# [0, 0, 0, 0] is equal to a RRRR monochrome filter +# in order for this to work, we need to rewrite the blue and green pixels +camera = RaspberryPiHqCamera(3, [0, 0, 0, 0]) + +# if self.mode == 1: +# width = 2028 +# height = 1080 +# if self.mode == 2: +# width = 2028 +# height = 1520 +# if self.mode == 3: +# width = 4056 +# height = 3040 +# if self.mode == 4: +# width = 1012 +# height = 760 + +# example of adding custom DNG tags to predefined tags from camera model +camera.tags.set(Tag.ApertureValue, [[4,1]]) # F 4.0 +camera.tags.set(Tag.ExposureTime, [[1,300]]) # SHUTTER 1/400 +camera.tags.set(Tag.PhotographicSensitivity, [1000]) # ISO 400 +camera.fmt = dict({ + # tuple is in the form width height + 'size': (4056,3040), + 'bpp': 12, + 'format': 'SRGGB12_CSI2P' +}) + +globalBlueArray = 0 +globalGreenArray = 0 +globalRedArray = 0 +def getColorArrays(rawFrame): + height, width = rawFrame.shape + + blueArray = np.zeros((int(height / 2), int(width / 2)), dtype=np.uint16) + blueArray[:, :] = rawFrame[::2, ::2] + print(blueArray[1000,1000]) + + redArray = np.zeros((int(height / 2), int(width / 2)), dtype=np.uint16) + redArray[:, :] = rawFrame[1::2, 1::2] + print(redArray[1000,1000]) + + greenArray = np.zeros((int(height), int(width / 2)), dtype=np.uint16) + # get even rows (BG rows) + greenArray[::2, :] = rawFrame[::2, 1::2] + # get odd rows (GR rows) + greenArray[1::2, :] = rawFrame[1::2, ::2] + + global globalBlueArray + globalBlueArray = blueArray + + global globalRedArray + globalRedArray = redArray + + global globalGreenArray + globalGreenArray = greenArray + + return rawFrame + +# pass camera reference into the converter. +r = RPICAM2DNG(camera) +unpackedPixels = r.__unpack_pixels__(data) +getColorArrays(unpackedPixels) + +plt.figure(figsize=(12, 8)) +plt.boxplot(globalGreenArray) +plt.show() + +# B G B G B G +# G R G R G R +# B G B G B G +# G R G R G R +# B G B G B G +# G R G R G R diff --git a/examples/app.py b/examples/app.py new file mode 100644 index 0000000..63da247 --- /dev/null +++ b/examples/app.py @@ -0,0 +1,62 @@ +from flask import Flask,request +import subprocess + +app = Flask(__name__) + +wifi_device = "wlan0" + +@app.route('/') +def index(): + result = subprocess.check_output(["nmcli", "--colors", "no", "-m", "multiline", "--get-value", "SSID", "dev", "wifi", "list", "ifname", wifi_device]) + ssids_list = result.decode().split('\n') + dropdowndisplay = f""" + + + + Wifi Control + + +

Wifi Control

+
+ + +

+ +

+ +

+ + + """ + return dropdowndisplay + + +@app.route('/submit',methods=['POST']) +def submit(): + if request.method == 'POST': + print(*list(request.form.keys()), sep = ", ") + ssid = request.form['ssid'] + password = request.form['password'] + connection_command = ["nmcli", "--colors", "no", "device", "wifi", "connect", ssid, "ifname", wifi_device] + if len(password) > 0: + connection_command.append("password") + connection_command.append(password) + result = subprocess.run(connection_command, capture_output=True) + if result.stderr: + return "Error: failed to connect to wifi network: %s" % result.stderr.decode() + elif result.stdout: + return "Success: %s" % result.stdout.decode() + return "Error: failed to connect." + + +if __name__ == '__main__': + app.run(debug=True, host='0.0.0.0', port=80) \ No newline at end of file diff --git a/examples/main.py b/examples/main.py index 8902dc3..c33602e 100644 --- a/examples/main.py +++ b/examples/main.py @@ -4,6 +4,7 @@ import datetime import os import cmd +from flask import Flask from pidng.core import RPICAM2DNG, DNGTags, Tag from pidng.camdefs import * @@ -84,7 +85,7 @@ def setMode(self, modeToSet): print(f'mode but be the value 1, 2, or 3. Got: {modeToSet}') def printConfig(self): - print(f''' + configString = f''' *** Cam Config ***\n camera name: {self.cameraName}\n bayer filter: {self.bayerFilterString}\n @@ -95,7 +96,9 @@ def printConfig(self): size: {self.width}x{self.height}\n mode: {self.mode}\n bit: {self.bit} - ''') + ''' + print(configString) + return configString def record(self): parentDir = self.getParentDirectoryForRecording() @@ -236,9 +239,21 @@ def medianRedShiftFilter(self, rawFrame): rawFrame[row][col] = average return rawFrame +# app = Flask(__name__) +# globalCam = RpiCam('100000', '1.0', '10', '1000') +# @app.route('/api/config/') +# def get_config(): +# return globalCam.printConfig() + +# @app.route('/') +# def get_wow(): +# print('wow') +# return 'hello' + def main(): # shutterSpeed (microseconds), analogGain, framerate, recordingTime - cam = RpiCam('100000', '1.0', '10', '1000') + cam = RpiCam('1200', '0.5', '10', '1000') + # app.run(debug=True, host='0.0.0.0', port=8080) commandHandler = CommandHandler(cam) commandHandler.cmdloop() diff --git a/examples/raw2dng.py b/examples/raw2dng.py index 79dcbd5..6ce58bb 100644 --- a/examples/raw2dng.py +++ b/examples/raw2dng.py @@ -4,16 +4,16 @@ import struct # image specs -width = 4096 -height = 3072 +width = 2020 +height = 1072 bpp= 12 # load raw data into 16-bit numpy array. numPixels = width*height -rawFile = 'extras/scene_daylight_211ms_c2.raw16' +rawFile = '../rpi-raw-test/rpi-raw-test/test00000.raw' rf = open(rawFile, mode='rb') rawData = struct.unpack("H"*numPixels,rf.read(2*numPixels)) -rawFlatImage = np.zeros(numPixels, dtype=np.uint16) +rawFlatImage = np.zeros((width, int(height / 3 * 2)), dtype=np.uint16) rawFlatImage[:] = rawData[:] rawImage = np.reshape(rawFlatImage,(height,width)) rawImage = rawImage >> (16 - bpp) diff --git a/examples/rpicam_bg_red_inheritance.py b/examples/rpicam_bg_red_inheritance.py index 78b4340..6772553 100644 --- a/examples/rpicam_bg_red_inheritance.py +++ b/examples/rpicam_bg_red_inheritance.py @@ -1,9 +1,11 @@ from pidng.core import RPICAM2DNG, DNGTags, Tag from pidng.camdefs import * import numpy as np +import matplotlib.pyplot as plt # load raw image data from file into numpy array. RAW frame from HQ camera. -img = '../raw-test-full-sensor/test00000.raw' +img = '../rpicam-raw-20-6-2024--15.12.3\BGGRss1200gain0.1---00001.raw' +# "C:\Users\matth\code\PiDNG\rpicam-raw-20-6-2024--15.12.3\BGGRss1200gain0.1---00001.dng" data = np.fromfile(img, dtype=np.uint8) # file size 4669440 # (1520 height) * (((2028 width * 12 bits per pixel) / 8 bits per byte) + 30 extra bytes*) = 4669440 bytes @@ -18,7 +20,7 @@ # camera = RaspberryPiHqCamera(3, CFAPattern.BGGR) # [0, 0, 0, 0] is equal to a RRRR monochrome filter # in order for this to work, we need to rewrite the blue and green pixels -camera = RaspberryPiHqCamera(3, [0, 0, 0, 0]) +camera = RaspberryPiHqCamera(3, [2, 1, 1, 0]) # if self.mode == 1: # width = 2028 @@ -66,9 +68,152 @@ def redShiftFilter(rawFrame): rawFrame[y][x] = rawFrame[targetRedY][targetRedX] return rawFrame +def medianRedShiftFilter(rawFrame): + height, width = rawFrame.shape + for col in range(1, width): # width + for row in range(1, height): # height + if(row % 2 == 0): + if(col % 2 == 0): # blue + # get nearest red + reds = [rawFrame[row - 1][col - 1], rawFrame[row - 1][col + 1], rawFrame[row + 1][col - 1], rawFrame[row + 1][col + 1]] + else: # green + reds = [rawFrame[row - 1][col], rawFrame[row + 1][col]] + else: + if(col % 2 == 0): # green + reds = [rawFrame[row][col - 1], rawFrame[row][col + 1]] + else: # red + reds = [rawFrame[row][col]] + average = int(sum(reds) / len(reds)) + rawFrame[row][col] = average + return rawFrame + +def smartMedianRedShiftFilter(rawFrame): + height, width = rawFrame.shape + for col in range(1, width): # width + for row in range(1, height): # height + if(row % 2 == 0): + if(col % 2 == 0): # blue + # get nearest red + reds = [rawFrame[row - 1][col - 1], rawFrame[row - 1][col + 1], rawFrame[row + 1][col - 1], rawFrame[row + 1][col + 1]] + else: # green + reds = [abs(rawFrame[row][col] - 200) * 4] + else: + if(col % 2 == 0): # green + reds = [abs(rawFrame[row][col] - 200) * 4] + else: # red + reds = [rawFrame[row][col]] + average = int(sum(reds) / len(reds)) + rawFrame[row][col] = average + return rawFrame + + +globalBlueArray = 0 +globalGreenArray = 0 +globalRedArray = 0 +def getColorArrays(rawFrame): + height, width = rawFrame.shape + + blueArray = np.zeros((int(height / 2), int(width / 2)), dtype=np.uint16) + blueArray[:, :] = rawFrame[::2, ::2] + print(blueArray[1000,1000]) + + redArray = np.zeros((int(height / 2), int(width / 2)), dtype=np.uint16) + redArray[:, :] = rawFrame[1::2, 1::2] + print(redArray[1000,1000]) + + greenArray = np.zeros((int(height), int(width / 2)), dtype=np.uint16) + # get even rows (BG rows) + greenArray[::2, :] = rawFrame[::2, 1::2] + # get odd rows (GR rows) + greenArray[1::2, :] = rawFrame[1::2, ::2] + + global globalBlueArray + globalBlueArray = blueArray + + global globalRedArray + globalRedArray = redArray + + global globalGreenArray + globalGreenArray = greenArray + + return rawFrame + +def normalizeBluePixels(rawFrame): + height, width = rawFrame.shape + for col in range(1, width): # width + for row in range(1, height): # height + if(row % 2 == 0): + if(col % 2 == 0): # blue + # get nearest red + nearValues = [rawFrame[row - 1][col - 1], rawFrame[row - 1][col + 1], rawFrame[row + 1][col - 1], rawFrame[row + 1][col + 1]] + average = int(sum(nearValues) / len(nearValues)) + rawFrame[row][col] = average + return rawFrame + +def normalizePixels(rawFrame): + # lowest possible value for the sensor https://www.strollswithmydog.com/pi-hq-cam-sensor-performance/ + IMX477_BLACK_LEVEL = 255 + GREEN_PIXEL_MULTIPLIER = 3.5 + BLUE_PIXEL_MULTIPLIER = 10 + height, width = rawFrame.shape + newFrame = np.zeros((height, width), dtype=np.uint16) + + red_data = rawFrame[1::2, 1::2] + green_data_even_rows = rawFrame[::2, 1::2] + green_data_odd_rows = rawFrame[1::2, ::2] + blue_data = rawFrame[::2, ::2] + + red_conditional = red_data < 400 + green_even_row_conditional = green_data_even_rows < 270 + green_odd_row_conditional = green_data_odd_rows < 270 + blue_conditional = blue_data < 300 + + normalized_green_on_even_rows = (abs(green_data_even_rows - IMX477_BLACK_LEVEL) * GREEN_PIXEL_MULTIPLIER) + IMX477_BLACK_LEVEL + normalized_green_on_odd_rows = (abs(green_data_odd_rows - IMX477_BLACK_LEVEL) * GREEN_PIXEL_MULTIPLIER) + IMX477_BLACK_LEVEL + normalized_blue_data = (abs(blue_data - IMX477_BLACK_LEVEL) * BLUE_PIXEL_MULTIPLIER) + IMX477_BLACK_LEVEL + + modified_red_data = np.where(red_conditional, IMX477_BLACK_LEVEL, red_data) + modified_green_even_row_data = np.where(green_even_row_conditional, IMX477_BLACK_LEVEL, normalized_green_on_even_rows) + modified_green_odd_row_data = np.where(green_odd_row_conditional, IMX477_BLACK_LEVEL, normalized_green_on_odd_rows) + modified_blue_data = np.where(blue_conditional, IMX477_BLACK_LEVEL, blue_data) + + # Setting these as integers and not floating point numbers is important. The resulting image will be broken. + # I believe it is due to floating point numbers as binary cannot be packed as expected + # blue + # newFrame[::2, ::2] = normalized_blue_data + newFrame[::2, ::2] = blue_data + + # green even + # newFrame[::2, 1::2] = np.floor(normalized_green_on_even_rows).astype(int) + newFrame[::2, 1::2] = green_data_even_rows + + # green odd + # newFrame[1::2, ::2] = np.floor(normalized_green_on_odd_rows).astype(int) + newFrame[1::2, ::2] = green_data_odd_rows + + # red + newFrame[1::2, 1::2] = red_data + + return newFrame + # pass camera reference into the converter. r = RPICAM2DNG(camera) -r.options(path="C:/Users/matth/code/PiDNG/raw-test-full-sensor", compress=False) -r.filter = redShiftFilter -r.convert(data, filename="./full-sensor-test-red-inheritance") -unpackedPixel = r.__unpack_pixels__(data=data) +r.options(path="C:/Users/matth/code/PiDNG/rpicam-raw-20-6-2024--13.0.24", compress=False) +r.filter = normalizePixels +r.convert(data, filename="./red-modx-1-normalized-comparable-green2") + +# blueDataToPlot = np.ascontiguousarray(globalBlueArray).view(np.uint16) +# plt.figure(figsize=(12, 8)) +# print(str(globalRedArray[1000,1000])) +# plt.boxplot(globalRedArray) +# plt.show() + +# B G B G B G +# G R G R G R +# B G B G B G +# G R G R G R +# B G B G B G +# G R G R G R + +# 283 / 314 = .9 +# 40 / From 5c53407b1b15be064901002809d5a234aaba52b1 Mon Sep 17 00:00:00 2001 From: matthewverde Date: Sat, 6 Jul 2024 20:11:00 -0700 Subject: [PATCH 8/9] mattg: commit notes --- examples/notes.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 examples/notes.md diff --git a/examples/notes.md b/examples/notes.md new file mode 100644 index 0000000..eb8d342 --- /dev/null +++ b/examples/notes.md @@ -0,0 +1,24 @@ +## Image analysis + +- Custom demosaicing is hard to compete with existing demosiacing algorithms (tried normalizing blue + green, setting values to "0" below certain values (both can be performed in custom demosaic algos)) +- Demosaicing algorithms allow a user to boost certain wavelengths while ignoring others (using the raw black points option) +- I found that setting red to 100 and green to 70 for IGV demosaic algo really made the dark regions pop +- Running more iterations on the capture sharpening option can really clear up an image (diminishing returns after 50 iterations) +- White point correction turned down to .1 can also lead to more clear images +- kidney bean is a region of space with low green levels coming through + + +### Thoughts: +- adding in multiple LEDs as backlights (red and blue) could result in over exposure and result in a lower quality image +- A single green LED will likely produce the best image as wavelengths will bleed into the red and blue spectrum +- Prioritizing our green pixels will produce a crisper image as we have the most pixels there +- The blue pixels are largely underutilized. That is not to say they don't produce some image quality. It was found that normalizing them results in large amounts of noise in the image +- Raw images are significantly bigger than existing jpeg, about 9x bigger +- We could make raw images smaller by cutting out surrounding black region donut, doing this could get the image down to ~6mb(double check this, I don't think i ended up squaring the radius), this would make the image about 2.5x bigger +- I think we should avoid coming up with custom demosaic algorithm, and instead opt for using existing ones +- raw-therapee has a cli +- There are python packages that exist that will demosaic for you +- demosaicing seems to be CPU intensive, and is likely best done as a post processing step once off of the raspberry-pi, and onto some better hardware +- Use aws, heorku, or house a service in the shop +- Possibility of coming up with cloud computing service that we charge for + From e84b135cb0791ab12554e4ab91590f6d7b01b6c5 Mon Sep 17 00:00:00 2001 From: matthewverde Date: Fri, 19 Jul 2024 13:17:18 -0700 Subject: [PATCH 9/9] mattg: add updates --- examples/analyzePixels.py | 1 + examples/rpicam_bg_red_inheritance.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/examples/analyzePixels.py b/examples/analyzePixels.py index 9d6ecc2..8d4852a 100644 --- a/examples/analyzePixels.py +++ b/examples/analyzePixels.py @@ -82,6 +82,7 @@ def getColorArrays(rawFrame): getColorArrays(unpackedPixels) plt.figure(figsize=(12, 8)) +# set blue green or red global array here plt.boxplot(globalGreenArray) plt.show() diff --git a/examples/rpicam_bg_red_inheritance.py b/examples/rpicam_bg_red_inheritance.py index 6772553..b615b6d 100644 --- a/examples/rpicam_bg_red_inheritance.py +++ b/examples/rpicam_bg_red_inheritance.py @@ -163,9 +163,9 @@ def normalizePixels(rawFrame): green_data_odd_rows = rawFrame[1::2, ::2] blue_data = rawFrame[::2, ::2] - red_conditional = red_data < 400 - green_even_row_conditional = green_data_even_rows < 270 - green_odd_row_conditional = green_data_odd_rows < 270 + red_conditional = red_data > 1100 + green_even_row_conditional = green_data_even_rows < 400 + green_odd_row_conditional = green_data_odd_rows < 400 blue_conditional = blue_data < 300 normalized_green_on_even_rows = (abs(green_data_even_rows - IMX477_BLACK_LEVEL) * GREEN_PIXEL_MULTIPLIER) + IMX477_BLACK_LEVEL @@ -192,7 +192,7 @@ def normalizePixels(rawFrame): newFrame[1::2, ::2] = green_data_odd_rows # red - newFrame[1::2, 1::2] = red_data + newFrame[1::2, 1::2] = modified_red_data return newFrame @@ -200,7 +200,7 @@ def normalizePixels(rawFrame): r = RPICAM2DNG(camera) r.options(path="C:/Users/matth/code/PiDNG/rpicam-raw-20-6-2024--13.0.24", compress=False) r.filter = normalizePixels -r.convert(data, filename="./red-modx-1-normalized-comparable-green2") +r.convert(data, filename="./red-modx-1-red-bean") # blueDataToPlot = np.ascontiguousarray(globalBlueArray).view(np.uint16) # plt.figure(figsize=(12, 8))