appDir: system/options/path
change-dir appDir
imageFile: ""
iSize: 320x240
margins: 5x5
isFile?: false
tasks: ["segment" "pose" "detect"] ;--("obb" "classify" not yet)
modes: ["predict" "track" ];--("benchmark" "train" "export" "val" not yet)
models: ["yolo11n-seg.pt" "yolo11n-pose.pt" "yolo11n.pt" "yolov9c-seg.pt" "yolov8n-seg.pt"]
source: ""
task: tasks/1
mode: modes/1
model: rejoin ["models/" models/1]
loadImage: does [
isFile?: false
tmpFile: request-file
unless none? tmpFile [
canvas1/image: load to-red-file tmpFile
canvas2/image: none
s: split-path tmpFile
imageFile: s/2
source: rejoin ["images/" imageFile]
sb/text: source
isFile?: true
]
]
runYOLO: does [
if isFile? [
canvas2/image: none
clear retStr/text
results: %results.txt
if exists? results [delete results]
prog: rejoin ["yolo " task " " mode " model=" model" " "source=" source]
sb/text: prog
do-events/no-wait
tt: dt [ret: call/wait/shell/output prog results]
if ret = 0 [
retStr/text: f: read results
f: find f "runs" ;--get directory
s: split f "[0m" ;--get complete directory
destination: rejoin [s/1 "/" imageFile]
canvas2/image: load to-red-file destination]
sb/text: rejoin ["Result: " destination " in " round/to (tt/3) 0.01 " sec"]
]
]
mainWin: layout [
title "Red and YOLO"
origin margins space margins
base 40x22 snow "Model"
dp1: drop-down 120 data models select 1
on-change [model: rejoin ["models/" pick face/data face/selected]]
base 40x22 snow "Tasks"
dp2: drop-down 80 data tasks select 1
on-change [task: pick face/data face/selected]
base 40x22 snow "Mode"
dp3: drop-down 80 data modes select 1
on-change [mode: pick face/data face/selected]
button "Load Image" [loadImage]
button "Run YOLO" [runYOLO]
pad 280x0 button 50 "Quit" [quit]
return
canvas1: base iSize
canvas2: base iSize
retStr: area iSize wrap
return
sb: field 645
]
view mainWin
Image Processing with Red and Rebol Languages
samedi 15 mars 2025
YOLO and Redbol
dimanche 9 mars 2025
Septimus: another real-world application with Red
My medical colleagues in the R2P2 unit are not all experienced developers. They've got other things to do, like saving lives. And they want immediate answers to their clinical questions. And they need easy-to-use tools.
That's why I like to use Red (or Rebol3) to develop tailor-made applications for my colleagues. No complexity (like Python), just Redbol simplicity!
That's what Septimus was designed for. We want to be able to follow the evolution of bacterial infections in our young patients according to the treatment applied. The big idea was to use an infra-red camera to detect points not visible to the naked eye.I've adapted some of redCV's functions to make it an independent module (see flir.red code). Actually, we use Septimus for following patients with acute tibial osteomyelitis.
Septimus is very simple. Once the IR image has been loaded, you can use a rectangle (of variable size or colour) to select the relevant body part in IR image. Then with a single button, you get the hottest point in that area.
mercredi 1 janvier 2025
Savitsky-Golay Filter
In 1964, A. Savitsky and M.J.E. Golay published an article in Analytical Chemistry describing a simple and effective smoothing technique: “Smoothing and Differentiation of Data by Simplified Least Squares Procedures”.
Their method makes it possible to smooth or derive a time series, with equidistant abscissa values, by a simple convolution with a series of coefficients corresponding to the degree of the chosen polynomial interpolation and to the desired operation: simple smoothing or derivation up to 5th order.
The convolution is performed by n multiplications, followed by the sum of the products and completed by dividing by the corresponding norm. The coefficients and norms are provided in the article. Savitzky and Golay's article is accompanied by 11 tables of coefficients suitable for smoothing or determining of the first 5 derivatives; convolutions are performed for different degrees of polynomials and over ranges from 5 to 25 points. The tables published by Savitzky and Golay contain different typo errors. They were corrected by J. Steiner, Y. Termonia and J. Deltour in 1972.
I really like this filter, as it preserves signal dynamics and effectively filters out background noise. We've used this technique a lot in recent years at R2P2 (https://uniter2p2.fr) to process videos (of babies) who were shaking. This prevented our neural networks from correctly identifying the baby's body joints. With this type of filter, everything is back to normal. The video images did not shake and the detection algorithms became perfect (see Taleb, A., Rambaud, P., Diop, S., Fauches, R., Tomasik, J., Jouen, F., Bergounioux, J. "Spinal Muscular Amyotrophy detection using computer vision and artificial intelligence." in JAMA Pediatrics, Published online March 4, 2024.).
The main advantage of this process is that it's rather easy to program, allowing direct access to derivative values. On the other hand, abscissa values must be equidistant, and extreme points are ignored.
You can find the filter code for Red:
(https://github.com/ldci/redCV/blob/master/samples/signal_processing/sgFilter.red)
And for Rebol 3 here:
https://github.com/ldci/R3_tests/blob/main/signalProcessing/sgFilter.r3
A. Savitzky, M.J.E. Golay, ANAL. CHEM., 36,1627 (1964)
J. Steiner, Y. Termonia, J. Deltour, ANAL. CHEM., 44,1909 (1972)
lundi 30 décembre 2024
Global movements in infants
At the R2P2 unit at Garches hospital (https://uniter2p2.fr/), we are heavily involved in developing systems for analysing spontaneous movements in babies.
In a recent article (A. Taleb, P. Rambaud, S. Diop, R. Fauches, J. Tomasik, F. Jouen, J. Bergounioux. “Spinal Muscular Amyotrophy detection using computer vision and artificial intelligence.” in JAMA Pediatrics, 2024), we developed a video-based AI system. It's a semi-supervised system. But as is often the case, these neural networks don't clearly explain how they make their classification decisions. So we added a SHAP module (https://shap.readthedocs.io/en/latest/) that explained how the network made its decision to distinguish normal motor skills from those of babies with spinal muscular atrophy. The result is brilliant. This neural network behaves like a human expert: it tells us that it was the absence of lower-limb movements in SMA babies that was its decision-making factor. That's great, because it's clinically consistent!
dimanche 15 décembre 2024
Signal processing with Red and Rebol
In many of the data we collect in hospital, we are dealing with time series, some of which show an unexpected variation as a function of time. For example, in our work on the perception of babies' cries by adults, we observed that most of the signals showed a linear temperature drift over the course of the experiment. This is probably linked to the electronics of our camera. For these reasons, I've developed a few simple algorithms in Red and Rebol 3 that solve some of these problems. I mainly use datatype vector!, which is very efficient for numerical calculations with Red or Rebol 3.
One of the first ways is to remove the DC constant from the signal. Simply remove the mean value of the signal for each value of the signal. Rebol and Red have a function (average) that calculates the average of a vector.
detrendSignal: func [v [vector!]
"Remove continuous component in signal"][
;--basic (x - mean)
_v: copy v
_average: average _v ---average is a native function in Red and Rebol 3
repeat i _v/length [_v/:i: _v/:i - _average]
_v
]
lundi 2 décembre 2024
The Virgina Project
The Virginia project (https://uniter2p2.fr/en/projects/) focuses on studying the thermoregulation of newborns from thermal images.
The primary goal of this project is to detect any deterioration in the infant’s health as early as possible using their thermal profile. Over 1,000 images of newborns were captured after birth at four different time points, corresponding to Apgar assessments at 1, 3, 5, and 10 minutes after birth.
The ultimate objective is to analyze the thermal evolution of these infants at these four key moments.
Infrared images were acquired with a FLIR T650sc camera. The T650sc camera is equipped with an uncooled Vanadium Oxide (VOx) microbolometer detector that produces thermal images of 640 x 480 pixels, with an accuracy of +/- 1 °C.
The Virginia software was developed entirely within the R2P2 laboratory (by ldci) using Red programming language (https://www.red-lang.org), and the redCV library for image processing (https://github.com/ldci/redCV). The Virginia software includes add-on modules for decoding images.
THE FLIR MODULE
This module has been tested with different FLIR cameras. Its main function is to decode the metadata contained in any radiometric file and to extract the visible image (RGB), the infrared image (IR), the color palette associated with the IR image as well as the temperatures (in degrees °C) associated to each pixel.
This module uses two external programs :
ExifTool (https://exiftool.org), written and maintained by Phil Harvey, is a fabulous program written in Perl that allows you to read and write the metadata of many computer files. ExifTool supports FLIR files. It works on macOs, Linux and Windows platforms.
ImageMagick (https://imagemagick.org/index.php) is a free software, including a library, as well as a set of command line utilities, allowing to create, convert, modify, and display images in a very large number of formats. The FLIR module mainly uses the magick utility for MacOs, Linux and Windows. (Convert is a macOS utility).
With these two tools, we can extract the RGB image and the 16-bit image (in pgm format) containing the temperatures.
Once the metadata are extracted, we call a Python library: PixelLib
THE PIXELLIB LIBRARY
This superb library written and maintained by Ayoola Olafenwa is used for the semantic segmentation which allows to identify the newborn in the image. We use the latest version of PixelLib (https://github.com/ayoolaolafenwa/PixelLib) which supports PyTorch and is more efficient for segmentation. The PyTorch version of PixelLib uses the PointRend object segmentation architecture by Alexander Kirillov et al. 2019 to replace the Mask R-CNN. PointRend is an excellent neural network for implementing object segmentation. It generates accurate segmentation masks and runs at a high speed that meets the growing demand for real-time computer vision applications.
First, we only look for the class person without looking for other objects in the RGB image. Then, we get the detected mask as a matrix of true or false values. It is then very simple to reconstruct the binary image of the mask by replacing the true values by the white color. With a simple AND logic operator between the FLIR image and the segmentation mask image, we obtain a new image that keeps only the thermal image of the baby. Only the pixel values higher than 0.0.0 (black) will be considered. Here, for example, the values of the baby's crotch will not be included for the various calculations.
After this first operation of body segmentation, we use a double algorithm. The next step is to detect the contours of the body. This operation will detect the contours in the mask as a polygon of vertices connected by a B-Spline curve. The contour detection algorithm uses several techniques. First, two morphological operators of dilation and erosion are successively applied to smooth the contours of the mask calculated by the semantic segmentation. Then we use the Freeman coding chain technique (FCC). This technique allows the coding with a limited number of bits (8) of the local direction of a contour element defined in the image. This allows the constitution of a chain of codes from an initial pixel, considering that a contour element links two related pixels.
When the result of the edge detection is adequate we can proceed to the calculation of the body temperatures. We use a ray-tracing algorithm that makes sure that each pixel of the image belongs to the polygon representing the baby's body. This operation allows us to extract from the 2-D temperature matrix only the body temperatures in the form of a vector which is then used for the different calculations.
The code is not open-source, as we are in the process of registering patents on certain technological innovations. As soon as this is possible, I will give free access to all sources. The idea was just to show that you can do great things with Red.
Freeman H. On the encoding of arbitrary geometric configurations. IRE Transactions on Electronics Computers. 1961. 10:260–268
Panoptic Segmentation. Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother, Piotr Dollar; Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 9404-9413
samedi 30 novembre 2024
Using json files with Red and Rebol
Json is an open standard file format and data interchange format that uses human-readable text to store and transmit data objects made of name–value pairs and arrays. Json and Red or Rebol are very similar in the way they represent data. I sincerely believe that the development of Json benefited from all the work done by Carl Sassenrath when he developed Rebol 2.
Between 2020 and 2023, I developed a major program at the Raymond Poincaré hospital (https://uniter2p2.fr/) with Red, which used a thermal camera to measure the body temperature of newborn babies. This was a bit tricky, as we had to extract the baby's body coordinates from the thermal image in order to measure body temperature. To do this, I used semantic segmentation algorithms such as those proposed by Ayoola Olafenwa with her PixelLib library (https://pixellib.readthedocs.io/en/latest/).
In this program, I also included an export of the babies' images in .jpg format, as well as an export of the baby's body coordinates in .json format. The idea was to be able to use this data with annotation tools such as labelMe (https://github.com/wkentaro/labelme).
A few days ago, I had to return to this data to prepare a publication. Bad surprise: PixelLib and LableMe no longer work with recent versions of macOS and Apple's new Silicon processors.
Fortunately, with Red (or Rebol3) I was able to solve the problem with a few lines of code.
Red [
Needs: 'View
]
;--we use func: all words are global
isFile?: none
loadImage: does [
canvas/image: none
clear f/text
clear info/text
isFile?: no
tmpf: request-file/filter ["jpeg Files" "*.jpg"]
unless none? tmpf [
jpgFile: tmpf
jsonFile: copy jpgFile
replace jsonFile ".jpg" ".json"
canvas/image: load tmpf
f/text: form tmpf
isFile?: yes
]
]
getCoordinates: func [
f [file!]
][
f: read f ;--Red read json file as string
replace f ",." ",0." ;--in case of missing 0 values
js: load-json f ;--json string -> redbol map object
keys: keys-of js ;--a block of keys
version: select js keys/1 ;--labelMe version
flags: select js keys/2 ;--none
shapes: select js keys/3 ;--coordinates are here as a block of length 1
imagePath: select js keys/4 ;--jpeg file
imageData: select js keys/5 ;--none
imageHeight: select js keys/6 ;--imageHeight
imageWidth: select js keys/7 ;--imageWidth
bPoints: copy [] ;--block for coordinates
;--Thanks to Oldes for s/points
foreach s shapes [
infos: rejoin ["Label: " s/label " ID: " s/group_id " Shape Type: " s/shape_type]
foreach p s/points [
if all [p/1 > 0.0 p/2 > 0.0] [append bPoints to pair! p]
]
]
bPoints ;--returned coordinates
]
showCoordinates: func [f [file!] b [block!]
][
code: compose [
fill-pen 255.0.0.120 ;--draw command
pen 0.0.0.100 ;--draw command
line-width 1 ;--draw command
polygon ;--draw command
]
img: load f
bb: make image! reduce [3x3 black]
foreach p b [
change at img p bb ;--draw coordinates in image
append code p ;--append polygons
]
]
view win: layout [
title "Neonate Labelling"
button "Load a Neonate File (.jpg)" [loadImage]
button "Draw Extracted Body" [
if isFile? [
showCoordinates jpgFile getCoordinates jsonFile
info/text: infos
canvas/image: draw img code
]
]
info: field 250
pad 15x0
button "Quit" [Quit]
return
canvas: base 640x480 white
return
f: field 640
do [f/enabled?: info/enabled?: no]
]
And the result: Avatar baby !