Enabling Breakthroughs in Life Sciences as CTO @ DeepMirror
rg314 / pytraction Goto Github PK
View Code? Open in Web Editor NEWBayesian Traction Force Microscopy
License: BSD 3-Clause "New" or "Revised" License
Bayesian Traction Force Microscopy
License: BSD 3-Clause "New" or "Revised" License
Look into using github actions over travis.ci
For images with many cells in the field of view, it might be tricky to use a region with no cells for the automatic estimation of the prior, even if the ROI of the cell of interest is present.
If multiple frames are available as reference (ideally with no cells), one could use them for this purpose even if no ROI was provided.
Is it possible to wrap functions and bays optimation around the jit in numba with a decorator?
Many users or first-time python users can find it challenging to figure out how to use software packages to speed up their workflow.
As we all know, nobody reads the read me.
Aki pointed out that in the ZeroCostDL4Mic** repo that jumps straight to video tutorial and google collab for users to get started quickly. I think this would be fantastic to do for this package.
The authors provide a few videos that outline the usage. This could be a real winner to get people using the software.
The PIV part is currently implemented using open PIV in pytraction/piv.py
but we also need to add the NMT functionality to filter the X, Y, U, V components. We also need to figure out how to set the threshold.
package PIV;
import ij.IJ;
import ij.ImagePlus;
import ij.ImageStack;
import ij.WindowManager;
import ij.gui.GenericDialog;
import ij.gui.WaitForUserDialog;
import ij.io.FileSaver;
import ij.io.OpenDialog;
import ij.io.SaveDialog;
import ij.measure.ResultsTable;
import ij.plugin.filter.MaximumFinder;
import ij.plugin.filter.PlugInFilter;
import ij.process.ColorProcessor;
import ij.process.FHT;
import ij.process.FloatProcessor;
import ij.process.ImageProcessor;
import java.io.BufferedOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.Arrays;
import java.util.Locale;
import org.opensourcephysics.display2d.GridPointData;
public class iterative_PIV implements PlugInFilter {
String arg;
ImagePlus imp;
int width;
int height;
String title;
String file;
String piv0Path;
private static int winS1;
private static int vecS1;
private static int sW1;
private static int winS2;
private static int vecS2;
private static int sW2;
private static int winS3;
private static int vecS3;
private static int sW3;
int nPass = 3;
private static double cThr;
boolean db = false;
boolean batch;
boolean pp = false;
boolean dCanceled = false;
boolean xc = true;
boolean chkPeakA = false;
boolean noChkPeak = false;
String ppMethod;
double pp1;
double pp2;
int dbX = -1;
int dbY = -1;
private static String dir = "";
int nx;
int ny;
double[][] PIVdata1;
double[][] PIVdata;
double[][] PIVdata0;
double[][][] PIV0;
int action = 3;
double noiseNMT1 = 0.2D, thrNMT1 = 5.0D, c1DMT = 3.0D, c2DMT = 1.0D;
double sdR;
double meanR;
double max0 = 0.0D;
public int setup(String paramString, ImagePlus paramImagePlus) {
this.arg = paramString;
this.imp = paramImagePlus;
return 2053;
}
public void run(ImageProcessor paramImageProcessor) {
int i = this.imp.getImageStackSize();
this.title = this.imp.getTitle();
this.width = this.imp.getWidth();
this.height = this.imp.getHeight();
String str = "_PIV1";
if (i != 2)
IJ.error("2 slices stack is required");
if (this.arg.equals("Cross-correlation")) {
if (!getParamsC()) {
this.imp.changes = false;
return;
}
} else if (this.arg.equals("Basic")) {
if (!getParamsB()) {
this.imp.changes = false;
return;
}
} else if (this.arg.equals("Debug")) {
if (!getParamsD()) {
this.imp.changes = false;
return;
}
} else if (!getParamsA()) {
this.imp.changes = false;
return;
}
IJ.log("PIV paramters: ");
IJ.log("pass1: Interrogation window=" + winS1 + " search window=" + sW1 + " vector spacing=" + vecS1);
IJ.log("pass2: Interrogation window=" + winS2 + " search window=" + sW2 + " vector spacing=" + vecS2);
IJ.log("pass3: Interrogation window=" + winS3 + " search window=" + sW3 + " vector spacing=" + vecS3);
if (this.noChkPeak) {
IJ.log("Peak check disabled");
} else if (this.chkPeakA) {
IJ.log("Using emperical parameters for peak check");
}
for (byte b = 1; b <= this.nPass; b++) {
int j = winS1;
int k = vecS1;
int m = sW1;
if (b == 1) {
if (this.piv0Path != null) {
try {
this.PIVdata0 = plot_.load2DArrayFromFile(this.piv0Path);
} catch (Exception exception) {
IJ.error(exception.getMessage());
}
plotPIV(this.PIVdata0, this.title + "_PIV0", false);
}
} else if (b == 2) {
this.PIVdata0 = this.PIVdata;
j = winS2;
k = vecS2;
m = sW2;
str = "_PIV2";
} else if (b == 3) {
this.PIVdata0 = this.PIVdata;
j = winS3;
k = vecS3;
m = sW3;
str = "_PIV3";
}
if (this.PIVdata0 == null) {
this.PIV0 = new double[1][1][1];
} else {
int[] arrayOfInt = plot_.getDimensions(this.PIVdata0);
this.PIV0 = plot_.convert2DPivTo3D(this.PIVdata0, arrayOfInt[0], arrayOfInt[1]);
}
this.PIVdata = doPIV(this.imp, j, k, m, this.PIV0);
if (!this.pp) {
this.PIVdata = replaceByMedian(this.PIVdata);
this.PIVdata = replaceByMedian2(this.PIVdata);
}
plotPIV(this.PIVdata, this.title + str, false);
if (this.db) {
IJ.log("" + this.title + str + ":");
logPIV(this.PIVdata);
}
if (this.batch) {
StringBuffer stringBuffer = generatePIVToPrint(this.PIVdata);
write2File(dir, this.title + str + "_disp.txt", stringBuffer.toString());
}
}
if (!this.batch) {
this.PIVdata1 = pivPostProcess(this.PIVdata);
ImagePlus imagePlus = WindowManager.getImage(this.title + str);
if (imagePlus != null)
imagePlus.close();
plotPIV(this.PIVdata1, this.title + str, true);
if (this.dCanceled) {
IJ.log("" + this.title + str + ":");
logPIV(this.PIVdata1);
}
} else if (this.ppMethod != "None") {
this.PIVdata1 = pivPostProcess_batch(this.PIVdata);
StringBuffer stringBuffer = generatePIVToPrint(this.PIVdata1);
write2File(dir, this.title + str + "_" + this.ppMethod + "_disp.txt", stringBuffer.toString());
}
this.imp.changes = false;
IJ.freeMemory();
}
private double[][] pivPostProcess(double[][] paramArrayOfdouble) {
double[][] arrayOfDouble = new double[paramArrayOfdouble.length][(paramArrayOfdouble[0]).length];
byte b;
for (b = 0; b < arrayOfDouble.length; b++)
System.arraycopy(paramArrayOfdouble[b], 0, arrayOfDouble[b], 0, (paramArrayOfdouble[b]).length);
if (this.db) {
WaitForUserDialog waitForUserDialog = new WaitForUserDialog("pause");
waitForUserDialog.show();
}
b = 0;
do {
byte b1;
if (!getParamsP()) {
this.dCanceled = true;
return paramArrayOfdouble;
}
ImagePlus imagePlus1 = WindowManager.getImage(this.title + "_temp");
switch (this.action) {
case 0:
arrayOfDouble = normalizedMedianTest(arrayOfDouble, this.noiseNMT1, this.thrNMT1);
arrayOfDouble = replaceByMedian(arrayOfDouble);
break;
case 1:
arrayOfDouble = dynamicMeanTest(arrayOfDouble, this.c1DMT, this.c2DMT);
arrayOfDouble = replaceByMedian(arrayOfDouble);
break;
case 2:
for (b1 = 0; b1 < arrayOfDouble.length; b1++)
System.arraycopy(paramArrayOfdouble[b1], 0, arrayOfDouble[b1], 0, (paramArrayOfdouble[b1]).length);
break;
case 3:
b = 1;
break;
}
if (imagePlus1 != null)
imagePlus1.close();
plotPIV(arrayOfDouble, this.title + "_temp", false);
if (!this.db)
continue;
logPIV(arrayOfDouble);
} while (b == 0);
ImagePlus imagePlus = WindowManager.getImage(this.title + "_temp");
if (imagePlus != null)
imagePlus.close();
if (this.db) {
IJ.log("PIV post process:");
logPIV(arrayOfDouble);
}
if (this.action == 3) {
StringBuffer stringBuffer = generatePIVToPrint(arrayOfDouble);
write2File(dir, this.file, stringBuffer.toString());
}
this.dCanceled = false;
return arrayOfDouble;
}
private double[][] pivPostProcess_batch(double[][] paramArrayOfdouble) {
double[][] arrayOfDouble = new double[paramArrayOfdouble.length][(paramArrayOfdouble[0]).length];
for (byte b = 0; b < arrayOfDouble.length; b++)
System.arraycopy(paramArrayOfdouble[b], 0, arrayOfDouble[b], 0, (paramArrayOfdouble[b]).length);
if (this.ppMethod == "NMT") {
arrayOfDouble = normalizedMedianTest(arrayOfDouble, this.noiseNMT1, this.thrNMT1);
arrayOfDouble = replaceByMedian(arrayOfDouble);
} else {
arrayOfDouble = dynamicMeanTest(arrayOfDouble, this.c1DMT, this.c2DMT);
arrayOfDouble = replaceByMedian(arrayOfDouble);
}
return arrayOfDouble;
}
private void plotPIV(double[][] paramArrayOfdouble, String paramString, boolean paramBoolean) {
double d2;
ColorProcessor colorProcessor = new ColorProcessor(this.width, this.height);
int[] arrayOfInt = plot_.getDimensions(paramArrayOfdouble);
double[][] arrayOfDouble = plot_.get2DElement(paramArrayOfdouble, arrayOfInt[0], arrayOfInt[1], 4);
double d1 = plot_.findMax2DArray(arrayOfDouble);
plot_.colorMax = d1;
if (this.max0 == 0.0D) {
d2 = 24.0D / d1;
this.max0 = d1;
} else {
d2 = 24.0D / this.max0;
plot_.colorMax = this.max0;
}
plot_.loadLut("S_Pet");
plot_.drawVectors((ImageProcessor)colorProcessor, arrayOfInt, paramArrayOfdouble, arrayOfDouble, d2, plot_.colors);
ImagePlus imagePlus = new ImagePlus(paramString, (ImageProcessor)colorProcessor);
imagePlus.show();
if (paramBoolean)
plot_.makeScaleGraph(d2);
if (this.batch) {
FileSaver fileSaver = new FileSaver(imagePlus);
fileSaver.saveAsTiff(dir + paramString + "_vPlot.tif");
}
}
private boolean getParamsA() {
GenericDialog genericDialog = new GenericDialog("Iterative PIV (Advanced Mode)");
genericDialog.addCheckbox("Load file as 0th pass PIV data?", false);
genericDialog.addMessage("(All sizes are in pixels)");
genericDialog.addMessage("1st pass PIV parameters:");
if (winS1 == 0)
winS1 = 128;
genericDialog.addNumericField("PIV1 interrogation window size", winS1, 0);
if (sW1 == 0)
sW1 = 256;
genericDialog.addMessage("(If search window size=window size, conventional xcorr will be used)");
genericDialog.addNumericField("SW1 :search window size", sW1, 0);
if (vecS1 == 0)
vecS1 = 64;
genericDialog.addNumericField("VS1 :Vector spacing", vecS1, 0);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("2nd pass PIV parameters: (set window size to zero to do only 1pass PIV)");
if (winS2 == 0)
winS2 = 64;
genericDialog.addNumericField("PIV2 interrogation window size", winS2, 0);
if (sW2 == 0)
sW2 = 128;
genericDialog.addNumericField("SW2 :search window size", sW2, 0);
if (vecS2 == 0)
vecS2 = 32;
genericDialog.addNumericField("VS2 :Vector spacing", vecS2, 0);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("3rd pass PIV parameters: (set window size to zero to do only 2pass PIV)");
if (winS3 == 0)
winS3 = 48;
genericDialog.addNumericField("PIV3 interrogation window size", winS3, 0);
if (sW3 == 0)
sW3 = 128;
genericDialog.addNumericField("SW3 :search window size", sW3, 0);
if (vecS3 == 0)
vecS3 = 16;
genericDialog.addNumericField("VS3 :Vector spacing", vecS3, 0);
genericDialog.addMessage("-----------------------");
if (cThr == 0.0D)
cThr = 0.6D;
genericDialog.addNumericField("correlation threshold", cThr, 2);
genericDialog.addCheckbox("Use advanced peak check? (empirical parameters)", false);
genericDialog.addCheckbox("Disable all peak checking?", false);
genericDialog.addCheckbox("Don't replace invalid vector by median?", false);
genericDialog.addCheckbox("batch mode?", false);
genericDialog.addChoice("Postprocessing", new String[] { "None", "NMT", "DMT" }, "None");
genericDialog.addNumericField("Postprocessing parameter1", 0.2D, 2);
genericDialog.addNumericField("Postprocessing parameter1", 5.0D, 2);
if (dir.equals(""))
dir = "/";
genericDialog.addStringField("Path to save outputs", dir, 30);
genericDialog.showDialog();
boolean bool = genericDialog.getNextBoolean();
winS1 = (int)genericDialog.getNextNumber();
sW1 = (int)genericDialog.getNextNumber();
vecS1 = (int)genericDialog.getNextNumber();
winS2 = (int)genericDialog.getNextNumber();
sW2 = (int)genericDialog.getNextNumber();
vecS2 = (int)genericDialog.getNextNumber();
winS3 = (int)genericDialog.getNextNumber();
sW3 = (int)genericDialog.getNextNumber();
vecS3 = (int)genericDialog.getNextNumber();
cThr = genericDialog.getNextNumber();
this.chkPeakA = genericDialog.getNextBoolean();
this.noChkPeak = genericDialog.getNextBoolean();
this.pp = genericDialog.getNextBoolean();
this.batch = genericDialog.getNextBoolean();
this.ppMethod = genericDialog.getNextChoice();
this.pp1 = genericDialog.getNextNumber();
this.pp2 = genericDialog.getNextNumber();
if (this.ppMethod == "NMT") {
this.noiseNMT1 = this.pp1;
this.thrNMT1 = this.pp2;
} else {
this.c1DMT = this.pp1;
this.c2DMT = this.pp2;
}
dir = genericDialog.getNextString();
if (vecS3 == 0 || sW3 == 0 || winS3 == 0)
this.nPass = 2;
if (vecS2 == 0 || sW2 == 0 || winS2 == 0)
this.nPass = 1;
if (!genericDialog.wasCanceled()) {
if (!checkParams()) {
IJ.error("Incompatible PIV parameters");
return false;
}
if (bool) {
OpenDialog openDialog = new OpenDialog("Select the PIV data", "");
if (openDialog.getDirectory() == null || openDialog.getFileName() == null)
return false;
this.piv0Path = openDialog.getDirectory();
this.piv0Path += openDialog.getFileName();
}
} else {
return false;
}
return true;
}
private boolean getParamsB() {
GenericDialog genericDialog = new GenericDialog("Iterative PIV (Basic)");
genericDialog.addMessage("(All sizes are in pixels)");
genericDialog.addMessage("1st pass PIV parameters:");
if (winS1 == 0)
winS1 = 128;
genericDialog.addNumericField("PIV1 interrogation window size", winS1, 0);
if (sW1 == 0)
sW1 = 256;
genericDialog.addMessage("(If search window size=window size, conventional xcorr will be used)");
genericDialog.addNumericField("SW1 :search window size", sW1, 0);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("2nd pass PIV parameters: (set window size to zero to do only 1pass PIV)");
if (winS2 == 0)
winS2 = 64;
genericDialog.addNumericField("PIV2 interrogation window size", winS2, 0);
if (sW2 == 0)
sW2 = 128;
genericDialog.addNumericField("SW2 :search window size", sW2, 0);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("3rd pass PIV parameters: (set window size to zero to do only 2pass PIV)");
if (winS3 == 0)
winS3 = 32;
genericDialog.addNumericField("PIV3 interrogation window size", winS3, 0);
if (sW3 == 0)
sW3 = 96;
genericDialog.addNumericField("SW3 :search window size", sW3, 0);
genericDialog.addMessage("-----------------------");
if (cThr == 0.0D)
cThr = 0.6D;
genericDialog.addNumericField("correlation threshold", cThr, 2);
genericDialog.showDialog();
winS1 = (int)genericDialog.getNextNumber();
sW1 = (int)genericDialog.getNextNumber();
vecS1 = winS1 / 2;
winS2 = (int)genericDialog.getNextNumber();
sW2 = (int)genericDialog.getNextNumber();
vecS2 = winS2 / 2;
winS3 = (int)genericDialog.getNextNumber();
sW3 = (int)genericDialog.getNextNumber();
vecS3 = winS3 / 2;
cThr = genericDialog.getNextNumber();
if (vecS3 == 0 || sW3 == 0 || winS3 == 0)
this.nPass = 2;
if (vecS2 == 0 || sW2 == 0 || winS2 == 0)
this.nPass = 1;
if (!checkParams()) {
IJ.error("Incompatible PIV parameters");
return false;
}
if (genericDialog.wasCanceled())
return false;
return true;
}
private boolean getParamsC() {
GenericDialog genericDialog = new GenericDialog("Iterative PIV (Cross-Correlation)");
genericDialog.addMessage("(All sizes are in pixels)");
if (winS1 == 0)
winS1 = 128;
genericDialog.addNumericField("PIV1 interrogation window size", winS1, 0);
if (winS2 == 0)
winS2 = 64;
genericDialog.addMessage("(set PIV2 window size to zero to do only 1 pass PIV)");
genericDialog.addNumericField("PIV2 interrogation window size", winS2, 0);
if (winS3 == 0)
winS3 = 32;
genericDialog.addMessage("(set PIV3 window size to zero to do only 2 pass PIV)");
genericDialog.addNumericField("PIV3 interrogation window size", winS3, 0);
genericDialog.showDialog();
winS1 = (int)genericDialog.getNextNumber();
sW1 = winS1;
vecS1 = winS1 / 2;
winS2 = (int)genericDialog.getNextNumber();
sW2 = winS2;
vecS2 = winS2 / 2;
winS3 = (int)genericDialog.getNextNumber();
sW3 = winS3;
vecS3 = winS3 / 2;
if (vecS3 == 0 || sW3 == 0 || winS3 == 0)
this.nPass = 2;
if (vecS2 == 0 || sW2 == 0 || winS2 == 0)
this.nPass = 1;
if (!checkParams()) {
IJ.error("Incompatible PIV parameters");
return false;
}
if (genericDialog.wasCanceled())
return false;
return true;
}
private boolean getParamsD() {
GenericDialog genericDialog = new GenericDialog("Iterative PIV (Debug mode)");
genericDialog.addMessage("(All sizes are in pixels)");
genericDialog.addMessage("1st pass PIV parameters:");
if (winS1 == 0)
winS1 = 128;
genericDialog.addNumericField("PIV1 interrogation window size", winS1, 0);
if (sW1 == 0)
sW1 = 256;
genericDialog.addMessage("(If search window size=window size, conventional xcorr will be used)");
genericDialog.addNumericField("SW1 :search window size", sW1, 0);
if (vecS1 == 0)
vecS1 = 64;
genericDialog.addNumericField("VS1 :Vector spacing", vecS1, 0);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("2nd pass PIV parameters: (set window size to zero to do only 1pass PIV)");
if (winS2 == 0)
winS2 = 64;
genericDialog.addNumericField("PIV2 interrogation window size", winS2, 0);
if (sW2 == 0)
sW2 = 128;
genericDialog.addNumericField("SW2 :search window size", sW2, 0);
if (vecS2 == 0)
vecS2 = 32;
genericDialog.addNumericField("VS2 :Vector spacing", vecS2, 0);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("3rd pass PIV parameters: (set window size to zero to do only 2pass PIV)");
if (winS3 == 0)
winS3 = 48;
genericDialog.addNumericField("PIV3 interrogation window size", winS3, 0);
if (sW3 == 0)
sW3 = 128;
genericDialog.addNumericField("SW3 :search window size", sW3, 0);
if (vecS3 == 0)
vecS3 = 16;
genericDialog.addNumericField("VS3 :Vector spacing", vecS3, 0);
genericDialog.addMessage("-----------------------");
if (cThr == 0.0D)
cThr = 0.6D;
genericDialog.addNumericField("correlation threshold", cThr, 2);
genericDialog.addCheckbox("Use advanced peak check? (empirical parameters)", false);
genericDialog.addCheckbox("Disable all peak checking?", true);
genericDialog.addCheckbox("Don't replace invalid vector by median?", true);
genericDialog.addMessage("-----------------------");
genericDialog.addNumericField("debug_X", -1.0D, 0);
genericDialog.addNumericField("debug_Y", -1.0D, 0);
genericDialog.showDialog();
winS1 = (int)genericDialog.getNextNumber();
sW1 = (int)genericDialog.getNextNumber();
vecS1 = (int)genericDialog.getNextNumber();
winS2 = (int)genericDialog.getNextNumber();
sW2 = (int)genericDialog.getNextNumber();
vecS2 = (int)genericDialog.getNextNumber();
winS3 = (int)genericDialog.getNextNumber();
sW3 = (int)genericDialog.getNextNumber();
vecS3 = (int)genericDialog.getNextNumber();
cThr = genericDialog.getNextNumber();
this.chkPeakA = genericDialog.getNextBoolean();
this.noChkPeak = genericDialog.getNextBoolean();
this.pp = genericDialog.getNextBoolean();
this.db = true;
this.dbX = (int)genericDialog.getNextNumber();
this.dbY = (int)genericDialog.getNextNumber();
this.batch = false;
if (vecS3 == 0 || sW3 == 0 || winS3 == 0)
this.nPass = 2;
if (vecS2 == 0 || sW2 == 0 || winS2 == 0)
this.nPass = 1;
if (!genericDialog.wasCanceled()) {
if (!checkParams()) {
IJ.error("Incompatible PIV parameters");
return false;
}
} else {
return false;
}
return true;
}
private boolean getParamsP() {
String[] arrayOfString = { "Normalized median test and replace invalid by median", "Dynamic mean test and replace invalid by median", "Restore unprocessed PIV", "Accept this PIV and output" };
GenericDialog genericDialog = new GenericDialog("PIV post-processing");
genericDialog.addChoice("What to do?", arrayOfString, arrayOfString[this.action]);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("Normalized median test parameters:");
genericDialog.addNumericField("noise for NMT", this.noiseNMT1, 2);
genericDialog.addNumericField("Threshold for NMT", this.thrNMT1, 2);
genericDialog.addMessage("-----------------------");
genericDialog.addMessage("Dynamic mean test parameters:");
genericDialog.addNumericField("C1 for DMT", this.c1DMT, 2);
genericDialog.addNumericField("C2 for DMT", this.c2DMT, 2);
genericDialog.addMessage("Dynamic threshold = C1+C2*(StdDev within the surrounding 3x3 vectors)");
genericDialog.showDialog();
this.action = genericDialog.getNextChoiceIndex();
this.noiseNMT1 = genericDialog.getNextNumber();
this.thrNMT1 = genericDialog.getNextNumber();
this.c1DMT = genericDialog.getNextNumber();
this.c2DMT = genericDialog.getNextNumber();
if (genericDialog.wasCanceled())
return false;
if (this.action == 3) {
SaveDialog saveDialog = new SaveDialog("Save PIVdata", IJ.getDirectory("home"), "PIV_" + this.imp.getTitle(), ".txt");
if (saveDialog.getDirectory() == null || saveDialog.getFileName() == null)
return false;
dir = saveDialog.getDirectory();
this.file = saveDialog.getFileName();
}
return true;
}
private boolean checkParams() {
if (winS1 == sW1) {
this.xc = true;
} else {
this.xc = false;
}
if (this.xc == true && !powerOf2Size(winS1)) {
IJ.error("PIV using conventional cross-correlation need the window size to be power of 2");
return false;
}
if (winS1 > sW1) {
IJ.error("Search window must be larger than interrogation window");
return false;
}
if (vecS1 > winS1) {
IJ.error("PIV vector spacing must be smaller or equal to interrogation window size");
return false;
}
if (this.nPass != 1) {
if (winS2 >= winS1) {
IJ.error("Interrogation window of second pass should be smaller than that of first pass");
return false;
}
if (this.xc == true && !powerOf2Size(winS2)) {
IJ.error("PIV using conventional cross-correlation need the window size to be power of 2");
return false;
}
if (winS2 > sW2) {
IJ.error("Search window must be larger than interrogation window");
return false;
}
if (vecS2 > winS2) {
IJ.error("PIV vector spacing must be smaller or equal to interrogation window size");
return false;
}
}
if (this.nPass == 3) {
if (winS3 >= winS2) {
IJ.error("Interrogation window of third pass should be smaller than that of second pass");
return false;
}
if (this.xc == true && !powerOf2Size(winS3)) {
IJ.error("PIV using conventional cross-correlation need the window size to be power of 2");
return false;
}
if (winS3 > sW3) {
IJ.error("Search window must be larger than interrogation window");
return false;
}
if (vecS3 > winS3) {
IJ.error("PIV vector spacing must be smaller or equal to interrogation window size");
return false;
}
}
return true;
}
private boolean powerOf2Size(int paramInt) {
int i = 2;
for (; i < paramInt; i *= 2);
return (i == paramInt);
}
double[][] doPIV(ImagePlus paramImagePlus, int paramInt1, int paramInt2, int paramInt3, double[][][] paramArrayOfdouble) {
ImageStack imageStack = paramImagePlus.getStack();
ImageProcessor imageProcessor1 = imageStack.getProcessor(1);
ImageProcessor imageProcessor2 = imageStack.getProcessor(2);
int[] arrayOfInt = new int[6];
double[] arrayOfDouble1 = new double[2];
double[] arrayOfDouble2 = new double[2];
double d1 = 0.0D, d2 = 0.0D, d3 = 0.0D, d4 = 0.0D, d5 = 0.0D, d6 = 0.0D;
double d7 = 0.0D, d8 = 0.0D, d9 = 0.0D;
boolean bool1 = true;
boolean bool2 = (paramArrayOfdouble.length == 1) ? true : false;
int i = 0, j = 0;
int k = paramInt1 / 4;
byte b1 = 0;
byte b2 = 0;
boolean bool3 = false;
this.nx = (int)Math.floor(((this.width - k * 2 - paramInt1) / paramInt2)) + 1;
this.ny = (int)Math.floor(((this.height - k * 2 - paramInt1) / paramInt2)) + 1;
double[][] arrayOfDouble = new double[this.nx * this.ny][16];
if (this.db) {
IJ.log("nx=" + this.nx);
IJ.log("ny=" + this.ny);
}
for (byte b3 = 0; b3 < this.ny; b3++) {
for (byte b = 0; b < this.nx; b++) {
FloatProcessor floatProcessor;
int m, n;
IJ.showProgress(b3 * this.nx + b, this.nx * this.ny);
arrayOfDouble[b3 * this.nx + b][0] = (k + paramInt1 / 2 + paramInt2 * b);
arrayOfDouble[b3 * this.nx + b][1] = (k + paramInt1 / 2 + paramInt2 * b3);
if (!bool2) {
double[] arrayOfDouble3 = lerpData(arrayOfDouble[b3 * this.nx + b][0], arrayOfDouble[b3 * this.nx + b][1], paramArrayOfdouble);
d1 = arrayOfDouble3[0];
d2 = arrayOfDouble3[1];
d7 = Math.sqrt(arrayOfDouble3[0] * arrayOfDouble3[0] + arrayOfDouble3[1] * arrayOfDouble3[1]);
arrayOfDouble[b3 * this.nx + b][12] = d1;
arrayOfDouble[b3 * this.nx + b][13] = d2;
arrayOfDouble[b3 * this.nx + b][14] = d7;
}
if (this.xc) {
i = (int)d1;
j = (int)d2;
} else {
i = 0;
j = 0;
}
int i4 = k + b3 * paramInt2 - (paramInt3 - paramInt1) / 2 + j;
int i3 = k + b * paramInt2 - (paramInt3 - paramInt1) / 2 + i;
if (i4 < 0)
i4 = 0;
if (i3 < 0)
i3 = 0;
if (i4 + paramInt3 > this.height)
i4 = this.height - paramInt3;
if (i3 + paramInt3 > this.width)
i3 = this.width - paramInt3;
imageProcessor2.setRoi(i3, i4, paramInt3, paramInt3);
int i1 = k + b * paramInt2;
int i2 = k + b3 * paramInt2;
if (i2 + paramInt1 > this.height - k)
i2 = this.height - k - paramInt1;
if (i1 + paramInt1 > this.width - k)
i1 = this.width - k - paramInt1;
imageProcessor1.setRoi(i1, i2, paramInt1, paramInt1);
if (i2 == 0 || i1 == 0 || i2 == this.height - paramInt1 || i1 == this.width - paramInt1) {
bool1 = false;
} else if (paramInt3 - paramInt1 < 20) {
bool1 = false;
} else {
bool1 = true;
}
if (this.xc) {
FHT fHT2 = new FHT(imageProcessor1.crop());
FHT fHT3 = new FHT(imageProcessor2.crop());
fHT2.transform();
fHT3.transform();
FHT fHT4 = fHT3.conjugateMultiply(fHT2);
fHT4.inverseTransform();
fHT4.swapQuadrants();
FHT fHT1 = fHT4;
m = paramInt1 / 2;
n = paramInt1 / 2;
} else {
floatProcessor = cvMatchTemplate.doMatch(imageProcessor2.crop(), imageProcessor1.crop(), 5, false);
m = k + b * paramInt2 - i3;
n = k + b3 * paramInt2 - i4;
}
if (k + paramInt1 / 2 + paramInt2 * b == this.dbX && k + paramInt1 / 2 + paramInt2 * b3 == this.dbY) {
IJ.log("position: " + arrayOfDouble[b3 * this.nx + b][0] + "," + arrayOfDouble[b3 * this.nx + b][1]);
IJ.log("edge:" + bool1);
(new ImagePlus("Match result", (ImageProcessor)floatProcessor)).show();
(new ImagePlus("tar_" + this.dbX + "," + this.dbY, imageProcessor2.crop())).show();
(new ImagePlus("ref_" + this.dbX + "," + this.dbY, imageProcessor1.crop())).show();
}
if ((floatProcessor.getStatistics()).stdDev == 0.0D) {
arrayOfDouble[b3 * this.nx + b][2] = 0.0D;
arrayOfDouble[b3 * this.nx + b][3] = 0.0D;
arrayOfDouble[b3 * this.nx + b][4] = 0.0D;
arrayOfDouble[b3 * this.nx + b][5] = 0.0D;
arrayOfDouble[b3 * this.nx + b][6] = 0.0D;
arrayOfDouble[b3 * this.nx + b][7] = 0.0D;
arrayOfDouble[b3 * this.nx + b][8] = 0.0D;
arrayOfDouble[b3 * this.nx + b][9] = 0.0D;
arrayOfDouble[b3 * this.nx + b][10] = 0.0D;
arrayOfDouble[b3 * this.nx + b][11] = 0.0D;
arrayOfDouble[b3 * this.nx + b][15] = 0.0D;
} else {
int[] arrayOfInt1 = { m, n };
int[] arrayOfInt2 = { (int)arrayOfDouble[b3 * this.nx + b][0], (int)arrayOfDouble[b3 * this.nx + b][1] };
ImageProcessor imageProcessor = floatProcessor.convertToShort(true);
if (!bool2) {
double d10, d11, arrayOfDouble3[] = new double[2];
double[] arrayOfDouble4 = new double[2];
if (this.db) {
IJ.log("position: " + arrayOfInt2[0] + "," + arrayOfInt2[1]);
IJ.log("dx0, dy0: " + d1 + "," + d2);
IJ.log("xyOri: " + arrayOfInt1[0] + "," + arrayOfInt1[1]);
}
if (this.xc) {
arrayOfInt = findMaxA(imageProcessor, bool1);
if (arrayOfInt[0] == -999 && arrayOfInt[1] == -999 && arrayOfInt[2] == -999 && arrayOfInt[3] == -999) {
IJ.log("no maximum found at:");
IJ.log("position: " + arrayOfInt2[0] + "," + arrayOfInt2[1]);
}
} else {
double[] arrayOfDouble5 = { d1, d2 };
arrayOfInt = findMaxC((ImageProcessor)floatProcessor, bool1, arrayOfDouble5, arrayOfInt1, arrayOfInt2);
}
int i5 = 1;
if (arrayOfInt[0] == -999) {
arrayOfDouble1[0] = -999.0D;
arrayOfDouble1[1] = -999.0D;
arrayOfDouble2[0] = -999.0D;
arrayOfDouble2[1] = -999.0D;
i5 = 2;
d10 = 0.0D;
d11 = 0.0D;
} else {
if (arrayOfInt[0] == arrayOfInt[2] && arrayOfInt[1] == arrayOfInt[3]) {
arrayOfDouble1 = gaussianPeakFit(imageProcessor, arrayOfInt[0], arrayOfInt[1]);
if (this.db) {
IJ.log("dxdyG[0]:" + arrayOfDouble1[0]);
IJ.log("dxdyG[1]:" + arrayOfDouble1[1]);
}
arrayOfDouble2 = arrayOfDouble1;
d3 = arrayOfDouble1[0] - m;
d4 = arrayOfDouble1[1] - n;
if (this.xc) {
d3 += i;
d4 += j;
}
d5 = d3;
d6 = d4;
d8 = Math.sqrt(d3 * d3 + d4 * d4);
d9 = d8;
arrayOfDouble3 = checkVector(d3, d4, d8, d1, d2, d7);
arrayOfDouble4 = arrayOfDouble3;
d10 = floatProcessor.getf(arrayOfInt[0], arrayOfInt[1]);
d11 = d10;
if (this.noChkPeak) {
i5 = checkThr(d10);
} else {
i5 = checkPeakB1(d10, d8, arrayOfDouble3);
}
} else {
arrayOfDouble1 = gaussianPeakFit(imageProcessor, arrayOfInt[0], arrayOfInt[1]);
arrayOfDouble2 = gaussianPeakFit(imageProcessor, arrayOfInt[2], arrayOfInt[3]);
if (this.db) {
IJ.log("dxdyG[0]:" + arrayOfDouble1[0]);
IJ.log("dxdyG[1]:" + arrayOfDouble1[1]);
IJ.log("dxdyG2[0]:" + arrayOfDouble2[0]);
IJ.log("dxdyG2[1]:" + arrayOfDouble2[1]);
}
d3 = arrayOfDouble1[0] - m;
d4 = arrayOfDouble1[1] - n;
d5 = arrayOfDouble2[0] - m;
d6 = arrayOfDouble2[1] - n;
if (this.xc) {
d3 += i;
d4 += j;
d5 += i;
d6 += j;
}
d8 = Math.sqrt(d3 * d3 + d4 * d4);
d9 = Math.sqrt(d5 * d5 + d6 * d6);
arrayOfDouble3 = checkVector(d3, d4, d8, d1, d2, d7);
arrayOfDouble4 = checkVector(d5, d6, d9, d1, d2, d7);
d10 = floatProcessor.getf(arrayOfInt[0], arrayOfInt[1]);
d11 = floatProcessor.getf(arrayOfInt[2], arrayOfInt[3]);
if (!this.xc)
if (this.noChkPeak) {
i5 = checkThr(d10);
} else if (this.chkPeakA) {
i5 = checkPeakA(d10, d11, d8, d9, arrayOfDouble3, arrayOfDouble4);
} else {
i5 = checkPeakB(d10, d11, d8, d9, arrayOfDouble3, arrayOfDouble4);
}
}
if (this.db) {
IJ.log("dx1: " + d3);
IJ.log("dy1: " + d4);
IJ.log("dx2: " + d5);
IJ.log("dy2: " + d6);
IJ.log("dx0: " + d1);
IJ.log("dy0: " + d2);
IJ.log("mag0: " + d7);
}
if (d10 < cThr)
b2++;
}
if (this.db) {
IJ.log("ang1: " + arrayOfDouble3[0]);
IJ.log("ang2: " + arrayOfDouble4[0]);
IJ.log("p1: " + d10);
IJ.log("p2: " + d11);
IJ.log("dL1: " + arrayOfDouble3[1]);
IJ.log("dL2: " + arrayOfDouble4[1]);
IJ.log("dL2-dL1: " + (Math.abs(arrayOfDouble4[1]) - Math.abs(arrayOfDouble3[1])));
IJ.log("Choice:" + i5);
}
switch (i5) {
case 0:
arrayOfDouble[b3 * this.nx + b][2] = d5;
arrayOfDouble[b3 * this.nx + b][3] = d6;
arrayOfDouble[b3 * this.nx + b][4] = d9;
arrayOfDouble[b3 * this.nx + b][5] = arrayOfDouble4[0];
arrayOfDouble[b3 * this.nx + b][6] = d11;
arrayOfDouble[b3 * this.nx + b][7] = d3;
arrayOfDouble[b3 * this.nx + b][8] = d4;
arrayOfDouble[b3 * this.nx + b][9] = d8;
arrayOfDouble[b3 * this.nx + b][10] = arrayOfDouble3[0];
arrayOfDouble[b3 * this.nx + b][11] = d10;
arrayOfDouble[b3 * this.nx + b][15] = 21.0D;
break;
case 1:
arrayOfDouble[b3 * this.nx + b][2] = d3;
arrayOfDouble[b3 * this.nx + b][3] = d4;
arrayOfDouble[b3 * this.nx + b][4] = d8;
arrayOfDouble[b3 * this.nx + b][5] = arrayOfDouble3[0];
arrayOfDouble[b3 * this.nx + b][6] = d10;
arrayOfDouble[b3 * this.nx + b][7] = d5;
arrayOfDouble[b3 * this.nx + b][8] = d6;
arrayOfDouble[b3 * this.nx + b][9] = d9;
arrayOfDouble[b3 * this.nx + b][10] = arrayOfDouble4[0];
arrayOfDouble[b3 * this.nx + b][11] = d11;
break;
case 2:
arrayOfDouble[b3 * this.nx + b][2] = d3;
arrayOfDouble[b3 * this.nx + b][3] = d4;
arrayOfDouble[b3 * this.nx + b][4] = d8;
arrayOfDouble[b3 * this.nx + b][5] = arrayOfDouble3[0];
arrayOfDouble[b3 * this.nx + b][6] = d10;
arrayOfDouble[b3 * this.nx + b][7] = d5;
arrayOfDouble[b3 * this.nx + b][8] = d6;
arrayOfDouble[b3 * this.nx + b][9] = d9;
arrayOfDouble[b3 * this.nx + b][10] = arrayOfDouble4[0];
arrayOfDouble[b3 * this.nx + b][11] = d11;
arrayOfDouble[b3 * this.nx + b][15] = -1.0D;
b1++;
break;
}
} else {
if (this.db) {
IJ.log("position: " + arrayOfInt2[0] + "," + arrayOfInt2[1]);
IJ.log("dx0, dy0: " + d1 + "," + d2);
IJ.log("xyOri: " + arrayOfInt1[0] + "," + arrayOfInt1[1]);
}
arrayOfInt = findMaxA((ImageProcessor)floatProcessor, bool1);
if ((((arrayOfInt[0] == -999) ? 1 : 0) & ((arrayOfInt[1] == -999) ? 1 : 0)) != 0) {
arrayOfDouble1 = new double[] { -999.0D, -999.0D };
arrayOfDouble2 = new double[] { -999.0D, -999.0D };
d3 = 0.0D;
d4 = 0.0D;
d5 = 0.0D;
d6 = 0.0D;
d8 = 0.0D;
d9 = 0.0D;
} else {
arrayOfDouble1 = gaussianPeakFit(imageProcessor, arrayOfInt[0], arrayOfInt[1]);
arrayOfDouble2 = gaussianPeakFit(imageProcessor, arrayOfInt[2], arrayOfInt[3]);
d3 = arrayOfDouble1[0] - m;
d4 = arrayOfDouble1[1] - n;
d5 = arrayOfDouble2[0] - m;
d6 = arrayOfDouble2[1] - n;
d8 = Math.sqrt(d3 * d3 + d4 * d4);
d9 = Math.sqrt(d5 * d5 + d6 * d6);
}
int i5 = floatProcessor.getWidth();
int i6 = floatProcessor.getHeight();
if (arrayOfInt[0] == -999 && arrayOfInt[1] == -999) {
arrayOfDouble[b3 * this.nx + b][2] = d3;
arrayOfDouble[b3 * this.nx + b][3] = d4;
arrayOfDouble[b3 * this.nx + b][4] = d8;
arrayOfDouble[b3 * this.nx + b][5] = 0.0D;
arrayOfDouble[b3 * this.nx + b][6] = -1.0D;
arrayOfDouble[b3 * this.nx + b][7] = d5;
arrayOfDouble[b3 * this.nx + b][8] = d6;
arrayOfDouble[b3 * this.nx + b][9] = d9;
arrayOfDouble[b3 * this.nx + b][10] = 0.0D;
arrayOfDouble[b3 * this.nx + b][11] = -1.0D;
b1++;
} else if (Math.abs(arrayOfDouble1[0] - arrayOfInt[0]) < (i5 / 2) && Math.abs(arrayOfDouble1[1] - arrayOfInt[1]) < (i6 / 2)) {
arrayOfDouble[b3 * this.nx + b][2] = d3;
arrayOfDouble[b3 * this.nx + b][3] = d4;
arrayOfDouble[b3 * this.nx + b][4] = d8;
arrayOfDouble[b3 * this.nx + b][5] = 0.0D;
arrayOfDouble[b3 * this.nx + b][6] = floatProcessor.getf(arrayOfInt[0], arrayOfInt[1]);
arrayOfDouble[b3 * this.nx + b][7] = d5;
arrayOfDouble[b3 * this.nx + b][8] = d6;
arrayOfDouble[b3 * this.nx + b][9] = d9;
arrayOfDouble[b3 * this.nx + b][10] = 0.0D;
arrayOfDouble[b3 * this.nx + b][11] = floatProcessor.getf(arrayOfInt[2], arrayOfInt[3]);
} else {
arrayOfDouble[b3 * this.nx + b][2] = d3;
arrayOfDouble[b3 * this.nx + b][3] = d4;
arrayOfDouble[b3 * this.nx + b][4] = d8;
arrayOfDouble[b3 * this.nx + b][5] = 0.0D;
arrayOfDouble[b3 * this.nx + b][6] = floatProcessor.getf(arrayOfInt[0], arrayOfInt[1]);
arrayOfDouble[b3 * this.nx + b][7] = d5;
arrayOfDouble[b3 * this.nx + b][8] = d6;
arrayOfDouble[b3 * this.nx + b][9] = d9;
arrayOfDouble[b3 * this.nx + b][10] = 0.0D;
arrayOfDouble[b3 * this.nx + b][11] = floatProcessor.getf(arrayOfInt[2], arrayOfInt[3]);
arrayOfDouble[b3 * this.nx + b][15] = -1.0D;
b1++;
}
}
}
}
}
IJ.log("#interpolated vector / #total vector = " + b1 + "/" + (this.nx * this.ny));
IJ.log("#vector with corr. value lower than threshold / #total vector = " + b2 + "/" + (this.nx * this.ny));
return arrayOfDouble;
}
private int checkPeakA(double paramDouble1, double paramDouble2, double paramDouble3, double paramDouble4, double[] paramArrayOfdouble1, double[] paramArrayOfdouble2) {
byte b = 1;
double d1 = paramDouble1 / paramDouble2;
double d2 = paramDouble3 - paramArrayOfdouble1[1];
if (d1 > 1.5D && paramDouble1 > this.meanR + 2.0D * this.sdR) {
b = 1;
} else if (paramDouble1 > this.meanR + 3.0D * this.sdR && paramArrayOfdouble1[0] < 20.0D && paramArrayOfdouble2[0] < 20.0D) {
b = 1;
} else if (paramDouble1 > this.meanR + 2.0D * this.sdR && paramArrayOfdouble1[0] < 20.0D && paramArrayOfdouble2[0] < 20.0D && Math.abs(paramArrayOfdouble1[0] - paramArrayOfdouble2[0]) < 5.0D) {
if (paramDouble3 >= d2 * 0.8D && paramDouble3 / d2 < 3.0D) {
b = 1;
} else if (paramDouble4 >= d2 * 0.8D && paramDouble4 / d2 < 3.0D) {
b = 0;
} else {
b = 2;
}
} else if (paramDouble1 < cThr) {
b = 2;
} else if (paramDouble1 - paramDouble2 < 0.1D || paramDouble1 / paramDouble2 < 1.2D) {
if (paramArrayOfdouble1[0] - paramArrayOfdouble2[0] > 90.0D && paramDouble4 / d2 < 3.0D && paramDouble4 / d2 > 0.33D) {
b = 0;
} else if (paramArrayOfdouble1[0] - paramArrayOfdouble2[0] > 30.0D && paramDouble4 / d2 < 1.5D && paramDouble4 / d2 > 0.67D) {
b = 0;
}
} else if (paramArrayOfdouble1[0] - paramArrayOfdouble2[0] > 50.0D && paramArrayOfdouble2[0] < 6.0D && paramDouble4 / d2 < 3.0D && paramDouble4 / d2 > 0.33D) {
b = 0;
}
return b;
}
private int checkPeakB(double paramDouble1, double paramDouble2, double paramDouble3, double paramDouble4, double[] paramArrayOfdouble1, double[] paramArrayOfdouble2) {
byte b = 1;
double d = paramDouble3 - paramArrayOfdouble1[1];
if (paramDouble1 < cThr) {
b = 2;
} else if (paramArrayOfdouble1[0] > 30.0D && d > 1.0D) {
if (paramArrayOfdouble2[0] < 5.0D && paramDouble4 / d < 2.0D && paramDouble4 / d > 0.5D) {
b = 0;
} else {
b = 2;
}
} else if ((paramDouble3 / d > 2.0D || paramDouble3 / d < 0.5D) && d > 1.0D) {
if (paramArrayOfdouble2[0] < 5.0D && paramDouble4 / d < 2.0D && paramDouble4 / d > 0.5D) {
b = 0;
} else {
b = 2;
}
}
return b;
}
private int checkPeakB1(double paramDouble1, double paramDouble2, double[] paramArrayOfdouble) {
byte b = 1;
double d = paramDouble2 - paramArrayOfdouble[1];
if (paramDouble2 / d > 5.0D && paramDouble2 > 1.0D && paramDouble1 < cThr)
b = 2;
if (paramArrayOfdouble[0] > 60.0D && paramDouble2 > 1.0D && paramDouble1 < cThr)
b = 2;
return b;
}
private int checkThr(double paramDouble) {
byte b = 1;
if (paramDouble < cThr) {
b = 2;
} else {
b = 1;
}
return b;
}
private void logPIV(double[][] paramArrayOfdouble) {
for (byte b = 0; b < paramArrayOfdouble.length; b++)
IJ.log("\t" + paramArrayOfdouble[b][0] + "\t" + paramArrayOfdouble[b][1] + "\t" + paramArrayOfdouble[b][2] + "\t" + paramArrayOfdouble[b][3] + "\t" + paramArrayOfdouble[b][4] + "\t" + paramArrayOfdouble[b][5] + "\t" + paramArrayOfdouble[b][6] + "\t" + paramArrayOfdouble[b][7] + "\t" + paramArrayOfdouble[b][8] + "\t" + paramArrayOfdouble[b][9] + "\t" + paramArrayOfdouble[b][10] + "\t" + paramArrayOfdouble[b][11] + "\t" + paramArrayOfdouble[b][12] + "\t" + paramArrayOfdouble[b][13] + "\t" + paramArrayOfdouble[b][14] + "\t" + paramArrayOfdouble[b][15]);
}
private int[] findMaxA(ImageProcessor paramImageProcessor, boolean paramBoolean) {
ResultsTable resultsTable = ResultsTable.getResultsTable();
resultsTable.reset();
MaximumFinder maximumFinder = new MaximumFinder();
double d1 = (paramImageProcessor.getStatistics()).stdDev;
double d2 = (paramImageProcessor.getStatistics()).mean;
double d3 = d1;
byte b = 0;
while ((resultsTable.getCounter() < 2 && b < 5) || (resultsTable.getCounter() == 0 && b < 10)) {
resultsTable.reset();
maximumFinder.findMaxima(paramImageProcessor, d3, -808080.0D, 4, paramBoolean, false);
d3 /= 2.0D;
b++;
}
int[] arrayOfInt = new int[4];
if (resultsTable.getCounter() == 1) {
arrayOfInt[0] = (int)resultsTable.getValue("X", 0);
arrayOfInt[1] = (int)resultsTable.getValue("Y", 0);
arrayOfInt[2] = (int)resultsTable.getValue("X", 0);
arrayOfInt[3] = (int)resultsTable.getValue("Y", 0);
} else if (resultsTable.getCounter() > 1) {
arrayOfInt[0] = (int)resultsTable.getValue("X", 0);
arrayOfInt[1] = (int)resultsTable.getValue("Y", 0);
arrayOfInt[2] = (int)resultsTable.getValue("X", 1);
arrayOfInt[3] = (int)resultsTable.getValue("Y", 1);
} else {
arrayOfInt[0] = -999;
arrayOfInt[1] = -999;
arrayOfInt[2] = -999;
arrayOfInt[3] = -999;
}
return arrayOfInt;
}
private int[] findMaxC(ImageProcessor paramImageProcessor, boolean paramBoolean, double[] paramArrayOfdouble, int[] paramArrayOfint1, int[] paramArrayOfint2) {
double d7, d8;
ResultsTable resultsTable = ResultsTable.getResultsTable();
resultsTable.reset();
MaximumFinder maximumFinder = new MaximumFinder();
double d1 = (paramImageProcessor.getStatistics()).stdDev;
double d2 = (paramImageProcessor.getStatistics()).mean;
this.sdR = d1;
this.meanR = d2;
double d3 = (paramImageProcessor.getStatistics()).max;
double d4 = (paramImageProcessor.getStatistics()).kurtosis;
double d5 = (paramImageProcessor.getStatistics()).skewness;
double d6 = d1;
int[] arrayOfInt = new int[4];
int k = paramImageProcessor.getWidth() / 2;
if (d4 > 6.0D || (d4 > 3.0D && d5 > -0.05D) || (d3 - d2) / d1 > 4.0D) {
maximumFinder.findMaxima(paramImageProcessor, d1 / 2.0D, d2 + 2.0D * d1, 4, paramBoolean, false);
if (resultsTable.getCounter() > 1) {
arrayOfInt[0] = (int)resultsTable.getValue("X", 0);
arrayOfInt[1] = (int)resultsTable.getValue("Y", 0);
arrayOfInt[2] = (int)resultsTable.getValue("X", 1);
arrayOfInt[3] = (int)resultsTable.getValue("Y", 1);
d7 = paramImageProcessor.getf(arrayOfInt[0], arrayOfInt[1]);
d8 = paramImageProcessor.getf(arrayOfInt[2], arrayOfInt[3]);
double d10 = d7 - d8;
double d11 = d7 / d8;
double d12 = d10 / d1;
if (d12 > 2.0D || d11 > 2.0D) {
if (this.db) {
IJ.log("Z1: significant peak found. curPos = " + paramArrayOfint2[0] + "," + paramArrayOfint2[1]);
IJ.log("peakH: " + d12);
IJ.log("p/p: " + d11);
IJ.log("X: " + arrayOfInt[0]);
IJ.log("Y: " + arrayOfInt[1]);
}
arrayOfInt[2] = arrayOfInt[0];
arrayOfInt[3] = arrayOfInt[1];
return arrayOfInt;
}
} else if (resultsTable.getCounter() == 1) {
if (this.db) {
IJ.log("Z3: significant peak found. curPos = " + paramArrayOfint2[0] + "," + paramArrayOfint2[1]);
IJ.log("kurt= " + d4);
IJ.log("skew= " + d5);
IJ.log("X: " + resultsTable.getValue("X", 0));
IJ.log("Y: " + resultsTable.getValue("Y", 0));
}
arrayOfInt[0] = (int)resultsTable.getValue("X", 0);
arrayOfInt[1] = (int)resultsTable.getValue("Y", 0);
arrayOfInt[2] = (int)resultsTable.getValue("X", 0);
arrayOfInt[3] = (int)resultsTable.getValue("Y", 0);
return arrayOfInt;
}
}
int i = (int)((paramArrayOfint1[0] - Math.round((k / 2))) + Math.round(paramArrayOfdouble[0]));
int j = (int)((paramArrayOfint1[1] - Math.round((k / 2))) + Math.round(paramArrayOfdouble[1]));
byte b = 4;
if (paramArrayOfdouble[0] < 0.0D) {
d7 = (paramArrayOfdouble[0] * -1.0D < (k / b)) ? paramArrayOfdouble[0] : (-k / b);
} else {
d7 = (paramArrayOfdouble[0] < (k / b)) ? paramArrayOfdouble[0] : (k / b);
}
if (paramArrayOfdouble[1] < 0.0D) {
d8 = (paramArrayOfdouble[1] * -1.0D < (k / b)) ? paramArrayOfdouble[1] : (-k / b);
} else {
d8 = (paramArrayOfdouble[1] < (k / b)) ? paramArrayOfdouble[1] : (k / b);
}
i = (int)(i + d7);
j = (int)(j + d8);
paramImageProcessor.setRoi(i, j, k, k);
double d9 = (paramImageProcessor.getStatistics()).stdDev;
resultsTable.reset();
maximumFinder.findMaxima(paramImageProcessor, d1 / 20.0D, d2, 4, paramBoolean, false);
if (resultsTable.getCounter() == 0) {
paramImageProcessor.resetRoi();
resultsTable.reset();
maximumFinder.findMaxima(paramImageProcessor, d1 / 10.0D, d2 + 2.0D * d1, 4, paramBoolean, false);
if (resultsTable.getCounter() == 0) {
if (this.db) {
IJ.log("no significant peak found. curPos = " + paramArrayOfint2[0] + "," + paramArrayOfint2[1]);
IJ.log("limitX: " + i);
IJ.log("limitY: " + j);
IJ.log("tolerance: " + (d1 / 10.0D));
IJ.log("threshold: " + (d2 + 2.0D * d1));
}
arrayOfInt[0] = -999;
arrayOfInt[1] = -999;
arrayOfInt[2] = -999;
arrayOfInt[3] = -999;
} else {
if (this.db) {
IJ.log("A1: one peak found in the whole map. curPos = " + paramArrayOfint2[0] + "," + paramArrayOfint2[1]);
IJ.log("limitX: " + i);
IJ.log("limitY: " + j);
IJ.log("tolerance: " + (d1 / 10.0D));
}
arrayOfInt[0] = (int)resultsTable.getValue("X", 0);
arrayOfInt[1] = (int)resultsTable.getValue("Y", 0);
arrayOfInt[2] = (int)resultsTable.getValue("X", 0);
arrayOfInt[3] = (int)resultsTable.getValue("Y", 0);
}
} else {
if (this.db) {
IJ.log("B1: peaks found in the preshift window. curPos = " + paramArrayOfint2[0] + "," + paramArrayOfint2[1]);
IJ.log("limitX: " + i);
IJ.log("limitY: " + j);
IJ.log("tolerance: " + (d1 / 20.0D));
for (byte b1 = 0; b1 < resultsTable.getCounter() &&
b1 < 2; b1++) {
IJ.log("X: " + resultsTable.getValue("X", b1));
IJ.log("Y: " + resultsTable.getValue("Y", b1));
}
}
arrayOfInt[0] = (int)resultsTable.getValue("X", 0);
arrayOfInt[1] = (int)resultsTable.getValue("Y", 0);
if (resultsTable.getCounter() > 1) {
arrayOfInt[2] = (int)resultsTable.getValue("X", 1);
arrayOfInt[3] = (int)resultsTable.getValue("Y", 1);
} else {
arrayOfInt[2] = arrayOfInt[0];
arrayOfInt[3] = arrayOfInt[1];
}
double d10 = paramImageProcessor.getf(arrayOfInt[0], arrayOfInt[1]);
double d11 = paramImageProcessor.getf(arrayOfInt[2], arrayOfInt[3]);
double d12 = d10 - d11;
double d13 = d10 / d11;
double d14 = d12 / d1;
double d15 = d12 / d9;
if (this.db) {
IJ.log("peakH= " + d14);
IJ.log("peakH2= " + d15);
IJ.log("pRatio= " + d13);
}
if (d10 > 0.5D && ((d14 + d15 > 2.0D && d13 > 2.0D) || d14 + d15 > 3.0D)) {
if (this.db)
IJ.log("peak1 muck significant than peak2");
arrayOfInt[2] = arrayOfInt[0];
arrayOfInt[3] = arrayOfInt[1];
} else if ((d10 <= 0.5D && d13 > 2.0D) || d14 + d15 > 4.0D) {
if (this.db) {
IJ.log("peak1 2 times higher than peak2");
IJ.log("peak1: " + d10);
}
arrayOfInt[2] = arrayOfInt[0];
arrayOfInt[3] = arrayOfInt[1];
}
}
return arrayOfInt;
}
private double[] gaussianPeakFit(ImageProcessor paramImageProcessor, int paramInt1, int paramInt2) {
double[] arrayOfDouble = new double[2];
double d1 = 0.0D, d2 = 0.0D, d3 = 0.0D;
if (paramInt1 == 0 || paramInt1 == paramImageProcessor
.getWidth() - 1 || paramInt2 == 0 || paramInt2 == paramImageProcessor
.getHeight() - 1) {
arrayOfDouble[0] = paramInt1;
arrayOfDouble[1] = paramInt2;
} else {
if (paramImageProcessor.getPixel(paramInt1 - 1, paramInt2) != 0)
d1 = Math.log(paramImageProcessor.getPixel(paramInt1 - 1, paramInt2));
if (paramImageProcessor.getPixel(paramInt1, paramInt2) != 0)
d2 = Math.log(paramImageProcessor.getPixel(paramInt1, paramInt2));
if (paramImageProcessor.getPixel(paramInt1 + 1, paramInt2) != 0)
d3 = Math.log(paramImageProcessor.getPixel(paramInt1 + 1, paramInt2));
arrayOfDouble[0] = paramInt1 + (d1 - d3) / (2.0D * d1 - 4.0D * d2 + 2.0D * d3);
if (Double.isNaN(arrayOfDouble[0]) || Double.isInfinite(arrayOfDouble[0]))
arrayOfDouble[0] = paramInt1;
if (paramImageProcessor.getPixel(paramInt1, paramInt2 - 1) != 0)
d1 = Math.log(paramImageProcessor.getPixel(paramInt1, paramInt2 - 1));
if (paramImageProcessor.getPixel(paramInt1, paramInt2 + 1) != 0)
d3 = Math.log(paramImageProcessor.getPixel(paramInt1, paramInt2 + 1));
arrayOfDouble[1] = paramInt2 + (d1 - d3) / (2.0D * d1 - 4.0D * d2 + 2.0D * d3);
if (Double.isNaN(arrayOfDouble[1]) || Double.isInfinite(arrayOfDouble[1]))
arrayOfDouble[1] = paramInt2;
}
return arrayOfDouble;
}
double[][] normalizedMedianTest(double[][] paramArrayOfdouble, double paramDouble1, double paramDouble2) {
byte b1 = 15;
for (byte b2 = 0; b2 < paramArrayOfdouble.length; b2++) {
for (byte b = 2; b < 4; b++) {
double[] arrayOfDouble = getNeighbours(paramArrayOfdouble, b2, b, b1);
if (arrayOfDouble != null) {
double d = Math.abs(paramArrayOfdouble[b2][b] - getMedian(arrayOfDouble)) / (getMedian(getResidualsOfMedian(arrayOfDouble)) + paramDouble1);
if (d > paramDouble2)
paramArrayOfdouble[b2][b1] = -1.0D;
} else {
paramArrayOfdouble[b2][b1] = -1.0D;
}
}
}
return paramArrayOfdouble;
}
double[][] dynamicMeanTest(double[][] paramArrayOfdouble, double paramDouble1, double paramDouble2) {
byte b1 = 15;
for (byte b2 = 0; b2 < paramArrayOfdouble.length; b2++) {
for (byte b = 2; b < 4; b++) {
double[] arrayOfDouble = getNeighbours(paramArrayOfdouble, b2, b, b1);
if (arrayOfDouble != null) {
double d2 = getMean(arrayOfDouble);
double d3 = calcStd(arrayOfDouble, d2);
double d1 = paramDouble1 + paramDouble2 * d3;
if (Math.abs(paramArrayOfdouble[b2][b] - d2) > d1)
paramArrayOfdouble[b2][b1] = -1.0D;
} else {
paramArrayOfdouble[b2][b1] = -1.0D;
}
}
}
return paramArrayOfdouble;
}
double[] getNeighbours(double[][] paramArrayOfdouble, int paramInt1, int paramInt2, int paramInt3) {
double[] arrayOfDouble = new double[9];
int i = paramInt1 / this.nx;
int j = paramInt1 - i * this.nx;
byte b = 0;
int k = 0;
k = paramInt1 - this.nx - 1;
if (i - 1 >= 0 && j - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 - this.nx;
if (i - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 - this.nx + 1;
if (i - 1 >= 0 && j + 1 < this.nx && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 - 1;
if (j - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + 1;
if (j + 1 < this.nx && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + this.nx - 1;
if (i + 1 < this.ny && j - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + this.nx;
if (i + 1 < this.ny && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + this.nx + 1;
if (i + 1 < this.ny && j + 1 < this.nx && paramArrayOfdouble[k][paramInt3] != -1.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
if (b > 0) {
double[] arrayOfDouble1 = new double[b];
System.arraycopy(arrayOfDouble, 0, arrayOfDouble1, 0, b);
return arrayOfDouble1;
}
return null;
}
double[] getNeighbours2(double[][] paramArrayOfdouble, int paramInt1, int paramInt2, int paramInt3) {
double[] arrayOfDouble = new double[9];
int i = paramInt1 / this.nx;
int j = paramInt1 - i * this.nx;
byte b = 0;
int k = 0;
k = paramInt1 - this.nx - 1;
if (i - 1 >= 0 && j - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 - this.nx;
if (i - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 - this.nx + 1;
if (i - 1 >= 0 && j + 1 < this.nx && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 - 1;
if (j - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + 1;
if (j + 1 < this.nx && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + this.nx - 1;
if (i + 1 < this.ny && j - 1 >= 0 && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + this.nx;
if (i + 1 < this.ny && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
k = paramInt1 + this.nx + 1;
if (i + 1 < this.ny && j + 1 < this.nx && paramArrayOfdouble[k][paramInt3] != -2.0D) {
b++;
arrayOfDouble[b - 1] = paramArrayOfdouble[k][paramInt2];
}
if (b > 0) {
double[] arrayOfDouble1 = new double[b];
System.arraycopy(arrayOfDouble, 0, arrayOfDouble1, 0, b);
return arrayOfDouble1;
}
return null;
}
double getMedian(double[] paramArrayOfdouble) {
Arrays.sort(paramArrayOfdouble);
int i = paramArrayOfdouble.length / 2;
if (paramArrayOfdouble.length % 2 > 0)
return paramArrayOfdouble[i];
return (paramArrayOfdouble[i] + paramArrayOfdouble[i - 1]) / 2.0D;
}
double getMean(double[] paramArrayOfdouble) {
double d = 0.0D;
for (byte b = 0; b < paramArrayOfdouble.length; b++)
d += paramArrayOfdouble[b];
return d / paramArrayOfdouble.length;
}
double calcStd(double[] paramArrayOfdouble, double paramDouble) {
double d = 0.0D;
for (byte b = 0; b < paramArrayOfdouble.length; b++)
d += (paramArrayOfdouble[b] - paramDouble) * (paramArrayOfdouble[b] - paramDouble);
return d / paramArrayOfdouble.length;
}
double[] getResidualsOfMedian(double[] paramArrayOfdouble) {
double d = getMedian(paramArrayOfdouble);
for (byte b = 0; b < paramArrayOfdouble.length; b++)
paramArrayOfdouble[b] = Math.abs(paramArrayOfdouble[b] - d);
return paramArrayOfdouble;
}
double[][] replaceByMedian(double[][] paramArrayOfdouble) {
byte b1 = 15;
double[][] arrayOfDouble = new double[paramArrayOfdouble.length][(paramArrayOfdouble[0]).length];
for (byte b2 = 0; b2 < arrayOfDouble.length; b2++)
System.arraycopy(paramArrayOfdouble[b2], 0, arrayOfDouble[b2], 0, (paramArrayOfdouble[b2]).length);
for (byte b3 = 0; b3 < arrayOfDouble.length; b3++) {
if (arrayOfDouble[b3][b1] == -1.0D) {
for (byte b = 2; b <= 3; b++) {
double[] arrayOfDouble1 = getNeighbours(paramArrayOfdouble, b3, b, b1);
if (arrayOfDouble1 != null) {
arrayOfDouble[b3][b] = getMedian(arrayOfDouble1);
arrayOfDouble[b3][b1] = 999.0D;
} else {
arrayOfDouble[b3][b] = 0.0D;
arrayOfDouble[b3][b1] = -2.0D;
}
}
if (arrayOfDouble[b3][b1] != -2.0D)
arrayOfDouble[b3][4] = Math.sqrt(arrayOfDouble[b3][2] * arrayOfDouble[b3][2] + arrayOfDouble[b3][3] * arrayOfDouble[b3][3]);
}
}
return arrayOfDouble;
}
double[][] replaceByMedian2(double[][] paramArrayOfdouble) {
byte b1 = 15;
double[][] arrayOfDouble = new double[paramArrayOfdouble.length][(paramArrayOfdouble[0]).length];
for (byte b2 = 0; b2 < arrayOfDouble.length; b2++)
System.arraycopy(paramArrayOfdouble[b2], 0, arrayOfDouble[b2], 0, (paramArrayOfdouble[b2]).length);
for (byte b3 = 0; b3 < arrayOfDouble.length; b3++) {
if (arrayOfDouble[b3][b1] == -2.0D) {
for (byte b = 2; b <= 3; b++) {
double[] arrayOfDouble1 = getNeighbours2(paramArrayOfdouble, b3, b, b1);
if (arrayOfDouble1 != null) {
arrayOfDouble[b3][b] = getMedian(arrayOfDouble1);
arrayOfDouble[b3][b1] = 9999.0D;
} else {
arrayOfDouble[b3][b] = 0.0D;
arrayOfDouble[b3][b1] = -22.0D;
}
}
if (arrayOfDouble[b3][b1] != -22.0D)
arrayOfDouble[b3][4] = Math.sqrt(arrayOfDouble[b3][2] * arrayOfDouble[b3][2] + arrayOfDouble[b3][3] * arrayOfDouble[b3][3]);
}
}
return arrayOfDouble;
}
public static double[] checkVector(double paramDouble1, double paramDouble2, double paramDouble3, double paramDouble4, double paramDouble5, double paramDouble6) {
double[] arrayOfDouble = new double[2];
double d1 = paramDouble1 * paramDouble4 + paramDouble2 * paramDouble5;
double d2 = paramDouble3;
double d3 = paramDouble6;
double d4 = Math.acos(d1 / d2 * d3);
if (Double.isNaN(d4) || Double.isInfinite(d4)) {
arrayOfDouble[0] = 0.0D;
} else {
arrayOfDouble[0] = d4 * 180.0D / Math.PI;
}
arrayOfDouble[1] = d2 - d3;
return arrayOfDouble;
}
private StringBuffer generatePIVToPrint(double[][] paramArrayOfdouble) {
NumberFormat numberFormat = NumberFormat.getInstance(Locale.US);
if (numberFormat instanceof DecimalFormat)
((DecimalFormat)numberFormat).applyPattern("###.##;-###.##");
numberFormat.setMaximumFractionDigits(12);
numberFormat.setMinimumFractionDigits(0);
StringBuffer stringBuffer = new StringBuffer();
for (byte b = 0; b < paramArrayOfdouble.length; b++) {
for (byte b1 = 0; b1 < (paramArrayOfdouble[0]).length; b1++) {
stringBuffer.append(numberFormat.format(paramArrayOfdouble[b][b1]));
stringBuffer.append(" ");
}
stringBuffer.append("\n");
}
return stringBuffer;
}
public boolean write2File(String paramString1, String paramString2, String paramString3) {
PrintWriter printWriter = null;
try {
FileOutputStream fileOutputStream = new FileOutputStream(paramString1 + paramString2);
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(fileOutputStream);
printWriter = new PrintWriter(bufferedOutputStream);
printWriter.print(paramString3);
printWriter.close();
return true;
} catch (IOException iOException) {
IJ.error("" + iOException);
return false;
}
}
double[] lerpData(double paramDouble1, double paramDouble2, double[][][] paramArrayOfdouble) {
double[] arrayOfDouble = new double[(paramArrayOfdouble[0][0]).length - 2];
GridPointData gridPointData = new GridPointData(paramArrayOfdouble.length, (paramArrayOfdouble[0]).length, (paramArrayOfdouble[0][0]).length);
gridPointData.setData(paramArrayOfdouble);
int[] arrayOfInt = { 0, 1, 2 };
arrayOfDouble = gridPointData.interpolate(paramDouble1, paramDouble2, arrayOfInt, arrayOfDouble);
return arrayOfDouble;
}
}
For some instance f(x) is evaluated to nan (this is during optimizing lambda)
Optimizing Lambda
Func-count x f(x) Procedure
1 381966 nan initial
2 618034 nan golden
3 236068 nan golden
4 472136 nan golden
5 326238 nan golden
6 416408 nan golden
7 360680 nan golden
8 395122 nan golden
9 373835 nan golden
10 386991 nan golden
11 378860 nan golden
12 383885 nan golden
13 380780 nan golden
14 382699 nan golden
15 381513 nan golden
16 382246 nan golden
17 381793 nan golden
18 382073 nan golden
19 381900 nan golden
20 382007 nan golden
21 381941 nan golden
22 381982 nan golden
23 381956 nan golden
24 381972 nan golden
25 381962 nan golden
26 381968 nan golden
27 381965 nan golden
28 381967 nan golden
29 381965 nan golden
30 381966 nan golden
31 381966 nan golden
32 381966 nan golden
33 381966 nan golden
34 381966 nan golden
35 381966 nan golden
36 381966 nan golden
37 381966 nan golden
38 381966 nan golden
39 381966 nan golden
NaN result encountered.
Time taken 0.641434907913208 s
------------------------
Do I need to make any changes in the config.yaml? I am getting following error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/tmp/ipykernel_282/1756007441.py in <module>
4 config = '../config/config.yaml'
5
----> 6 traction_config = TractionForceConfig(E=E, scaling_factor=pix_per_mu, config=config)
7
8
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/core.py in __init__(self, scaling_factor, E, min_window_size, dt, s, meshsize, device, segment, config)
35 self.config = self._get_config(min_window_size, dt, E, s, meshsize, scaling_factor, segment)
36 else:
---> 37 self.config = self._config_ymal(config, min_window_size, dt, E, s, meshsize, scaling_factor)
38
39
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/core.py in _config_ymal(config, min_window_size, dt, E, s, meshsize, scaling_factor)
71 @staticmethod
72 def _config_ymal(config, min_window_size, dt, E, s, meshsize, scaling_factor):
---> 73 config['tfm']['E'] = E,
74 config['tfm']['pix_per_mu'] = scaling_factor
75 config['tfm']['meshsize'] = meshsize
TypeError: string indices must be integers
Originally posted by @l-kaplan in #44 (comment)
Poor error message raised in example3 and same error is raised for #32
multithread _process_stack method and include num_workers
arg i.e. process_stack(img, ref, num_workers=x)
. Note that the log file (either h5 or defaultdict) will need to have a lock.
The part that needs threading is
#pytraction/core.py
def _process_stack(self, img_stack, ref_stack, bead_channel=0, roi=False, frame=[], crop=False):
...
file
for frame in list(range(nframes)):
res = do_somestuff()
file.add(res)
becomes
#pytraction/core.py
def _process_stack(self, img_stack, ref_stack, bead_channel=0, roi=False, frame=[], crop=False):
...
file = p.map(do_somestuff, range(nframe))
The following scripts should be considerably faster.
from pytraction.core import TractionForce
from pytraction.utils import plot
pix_per_mu = 1.3 # The number of pixels per micron
E = 100 # Youngs modulus in Pa
img_path = 'data/example1/e01_pos1_axon1.tif'
ref_path = 'data/example1/e01_pos1_axon1_ref.tif'
traction_obj = TractionForce(pix_per_mu, E=E)
img, ref, _ = traction_obj.load_data(img_path, ref_path)
log = traction_obj.process_stack(img, ref)
plot(log, frame=0)
plt.show()
The majority of the code is missing documentation and tests.
It would be helpful if people could
To align slices a template depth must be determined which determines how much smaller to make the reference. It is important that this scales with input sizes. However, we're currently taking 10% of the min. Need to check dims for edge cases and write tests. This can cause major issues for the PIV is not done correctly.
def allign_slice(img, ref):
"""
:param img: Image slice to allign
:param ref: Reference bead to calculate alignment
:return: Aligned image
"""
# amount to reduce template
depth = int(min(img.shape)*0.1)
When trying to run example1.ipynb from the premade examples, calling the TractionForceConfig function causes following error :
_
FileNotFoundError Traceback (most recent call last)
Cell In[10], line 1
----> 1 traction_config = TractionForceConfig(E=E, scaling_factor=pix_per_um, config=config) # config TractionForceConfig object
2 img, ref, _ = traction_config.load_data(img_path, ref_path) # we can load the data using the load_data method
4 print(f'The expected shape of the image is {img.shape}')File ~/opt/anaconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/core.py:47, in TractionForceConfig.init(self, E, scaling_factor, config, min_window_size, meshsize, s, knn, cnn, **kwards)
41 self.config = self._config_yaml(
42 config, E, min_window_size, s, meshsize, scaling_factor
43 )
45 self.knn = self._get_knn_model() if knn else None
46 self.model, self.pre_fn = (
---> 47 self._get_cnn_model(device=self.config["settings"]["device"])
48 if cnn
49 else (None, None)
50 )
52 for k, v in kwards:
53 self.config[k] = vFile ~/opt/anaconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/core.py:92, in TractionForceConfig._get_cnn_model(device)
83 gdd.download_file_from_google_drive(
84 file_id=file_id,
85 dest_path=destination,
(...)
88 overwrite=False,
89 )
91 # currently using model from 20210316
---> 92 best_model = torch.load(f"{tmpdir}/best_model_1.pth", map_location="cpu")
93 if device == "cuda" and torch.cuda.is_available():
94 best_model = best_model.to("cuda")File ~/opt/anaconda3/envs/pytraction/lib/python3.8/site-packages/torch/serialization.py:771, in load(f, map_location, pickle_module, weights_only, **pickle_load_args)
768 if 'encoding' not in pickle_load_args.keys():
769 pickle_load_args['encoding'] = 'utf-8'
--> 771 with _open_file_like(f, 'rb') as opened_file:
772 if _is_zipfile(opened_file):
773 # The zipfile reader is going to advance the current file position.
774 # If we want to actually tail call to torch.jit.load, we need to
775 # reset back to the original position.
776 orig_position = opened_file.tell()File ~/opt/anaconda3/envs/pytraction/lib/python3.8/site-packages/torch/serialization.py:270, in _open_file_like(name_or_buffer, mode)
268 def _open_file_like(name_or_buffer, mode):
269 if _is_path(name_or_buffer):
--> 270 return _open_file(name_or_buffer, mode)
271 else:
272 if 'w' in mode:File ~/opt/anaconda3/envs/pytraction/lib/python3.8/site-packages/torch/serialization.py:251, in _open_file.init(self, name, mode)
250 def init(self, name, mode):
--> 251 super(_open_file, self).init(open(name, mode))FileNotFoundError: [Errno 2] No such file or directory: '/var/folders/jn/khnrf1p13v3g_hq6lnq_gjg80000gn/T/best_model_1.pth'
_
I think that this is another GoogleDriveDownloader error because, in core.py lines 77-81, there is a file called "model.zip" downloaded using GoogleDriveDownloader with the File-ID "1zShYcG8IMsMjB8hA6FcBTIZPfi_wDL4n". That downloads a 0.0B File into the temporary directory again, because of an internal error in the GoogleDriveDownloader module. The missing file "best_model_1.pth" is presumably inside the "model.zip" file, which can't be unzipped because of the flawed download.
Could someone provide the link to these files/their Google Drive as a workaround for that issue?
Thanks in advance!
I performed the tfm analysis with ROIs. But the output in the h5 file under traction_map still lists all measured forces.
I would like to only get the forces from the cell meaning inside the ROI, so I wrote some code in R to do this. It works, but is a bit tricky since the coordinates of the mask_roi are related to the image pixels and the coordinates of the traction map is something else (related to window size?). It basically requires some rescaling, which can't be done with a constant as far as I see..
So I was wondering if there is a native way inside pytraction or the h5 output file to filter out forces outside the ROI, that I might have missed.
Need to add test and sample data to Google Drive and pull down on .sh call
Currently, TractionForce.process_stack() takes a while to run on the example files in usage.ipynb since they are made of more than 10 images per stack. For demonstration purposes 3-5 images per stack would be plenty.
Creating a traction_object
traction_obj2 = TractionForce(pix_per_um, E=E, segment=True, window_size=64)
And running process_stack as
log2 = traction_obj2.process_stack(img, ref, verbose=1)
Gives this error:
RuntimeError: Sizes of tensors must match except in dimension 1. Got 98 and 99 in dimension 3 (The offending index is 1)
Which is very weird because these are the dimensions:
img.shape, ref.shape
((10, 2, 1088, 1590), (2, 1088, 1590))
This issue doesn't happen when using
traction_obj2 = TractionForce(pix_per_um, E=E, segment=False, window_size=64)
If I try to process my full uncropped 2048x2048 images I get a memory error saying I ran out of RAM. Cropped images do work though.
('Algorithm : ', 'WiDIM')
('ITERATION # ', 0)
..[DONE]
(' --residual : ', 0.9999999812503433)
Starting validation..
('Validation, iteration number ', 0)
('Validation, iteration number ', 1)
('Validation, iteration number ', 2)
..[DONE]
MemoryError Traceback (most recent call last)
/tmp/ipykernel_350/2562665077.py in
41 # p.map(tf,range(1,17))
42
---> 43 log1 = tf(0)
44 print("--- %s seconds ---" % (time.time() - start_time))
45
/tmp/ipykernel_350/2562665077.py in tf(i)
27 img, ref, roi = traction_config.load_data(img_path, ref_path, roi_path) # we can load the dat using the load_data method
28
---> 29 log1 = process_stack(img, ref, traction_config, roi = roi, verbose=0)
30 log1.save("out/pos_"+ j + "_.h5")
31
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/core.py in process_stack(img_stack, ref_stack, config, bead_channel, cell_channel, roi, frame, crop, verbose, custom_noise)
313
314 # compute traction map
--> 315 traction_map, f_n_m, L_optimal = calculate_traction_map(pos, vec, beta,
316 config.config['tfm']['meshsize'],
317 config.config['tfm']['s'],
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/process.py in calculate_traction_map(pos, vec, beta, meshsize, s, pix_per_mu, E)
48
49 # get lambda from baysian bad boi
---> 50 L, evidencep, evidence_one = optimal_lambda(beta, fuu, Ftux, Ftuy, 1, s, meshsize, i_max, j_max, X, 1)
51
52 # do the TFM with bays lambda
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/optimal_lambda.py in optimal_lambda(beta, fuu, Ftux, Ftuy, E, s, cluster_size, i_max, j_max, X, sequence)
54
55 target = partial(minus_logevidence, beta=beta, C_a=C_a, BX_a=BX_a, X=X, fuu=fuu, constant=constant, Ftux=Ftux,Ftuy=Ftuy,E=E,s=s,cluster_size=cluster_size,i_max=i_max, j_max=j_max)
---> 56 alpha_opt = optimize.fminbound(target, alpha1, alpha2, disp=3)
57
58 evidence_one = -target(alpha_opt)
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/scipy/optimize/optimize.py in fminbound(func, x1, x2, args, xtol, maxfun, full_output, disp)
1907 'disp': disp}
1908
-> 1909 res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
1910 if full_output:
1911 return res['x'], res['fun'], res['status'], res['nfev']
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/scipy/optimize/optimize.py in _minimize_scalar_bounded(func, bounds, args, xatol, maxiter, disp, **unknown_options)
1956 rat = e = 0.0
1957 x = xf
-> 1958 fx = func(x, *args)
1959 num = 1
1960 fmin_data = (1, xf, fx)
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/pytraction/optimal_lambda.py in minus_logevidence(alpha, beta, C_a, BX_a, X, fuu, constant, Ftux, Ftuy, E, s, cluster_size, i_max, j_max)
22 A = alphacsr_matrix(C_a) + BX_a
23
---> 24 L = sparse_cholesky(csr_matrix(A)).toarray()
25 logdetA = 2np.sum(np.log(np.diag(L)))
26
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/scipy/sparse/compressed.py in toarray(self, order, out)
1029 if out is None and order is None:
1030 order = self._swap('cf')[0]
-> 1031 out = self._process_toarray_args(order, out)
1032 if not (out.flags.c_contiguous or out.flags.f_contiguous):
1033 raise ValueError('Output array must be C or F contiguous')
~/miniconda3/envs/pytraction/lib/python3.8/site-packages/scipy/sparse/base.py in _process_toarray_args(self, order, out)
1200 return out
1201 else:
-> 1202 return np.zeros(self.shape, dtype=self.dtype, order=order)
1203
1204
MemoryError: Unable to allocate 48.6 GiB for an array with shape (80800, 80800) and data type float64`
When taking images of the bead channel, one time some of the settings got mixed up which led to images of rather bad quality. They had bad contrast/dynamic range and were rather blurry. Nevertheless I was able to generate reasonable traction force maps using Fiji, while pytraction seemed to struggle in these cases.
For the Fiji pipeline I use rolling ball background subtraction after stack alignment which I think might have helped.
Here is the link to some example images and the outputs I got. For each image it contains following files:
filename | description | comment |
---|---|---|
Magnitude map ... | magnitude map generated with FiJi pipeline | |
pos_... | cropped images and roi used for pytraction pipeline; pdf file of traction force map generated by pytraction |
wc = with cell; woc = without cell |
..._RAW | raw images | ch00 = phase contrast; ch01 = beads |
Currently, the log is displayed if verbose is set to 0 and omitted if set to 1. One would intuitively expect the opposite.
Wolfram will have a play with simulations over the next week. To generate either a ground truth PIV or displacement field
I just realized that I pushed with non default configs!
Please make sure that crop_aligned_slice
is set to True
settings:
bead_channel: 0
cell_channel: 1
segment: False
device: cpu
crop_aligned_slice: True
Originally posted by @rg314 in #52 (comment)
As suggested by @andimi each example should be separated into individual notebooks. It could also be wise to put them into collab notebooks rather than Jupiter notebooks. What do you think?
Executing the get_example_data.py script in the pytraction/scripts path produces following error:
Downloading 1DsPuqAzI7CEH-0QN-DWHdnF6-to5HdFe into data/data.zip...
0.0 B Done.
Unzipping.../Users/User/opt/anaconda3/envs/pytraction/lib/python3.8/site-packages/google_drive_downloader/google_drive_downloader.py:78: UserWarning: Ignoring unzip
since "1DsPuqAzI7CEH-0QN-DWHdnF6-to5HdFe" does not look like a valid zip file
warnings.warn('Ignoring unzip
since "{}" does not look like a valid zip file'.format(file_id))
There could be an error with the File-ID of the file the script is trying to download. This would explain why the size of the file is 0.0 B. I have no other idea what could cause this error.
During running there're some warnings that might alert users. These should be fixed to prevent this.
/Users/ryan/miniconda3/envs/pytraction/lib/python3.8/site-packages/scipy/optimize/optimize.py:2015: ComplexWarning: Casting complex values to real discards the imaginary part
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
/Users/ryan/miniconda3/envs/pytraction/lib/python3.8/site-packages/scipy/sparse/linalg/dsolve/linsolve.py:318: SparseEfficiencyWarning: splu requires CSC matrix format
Currently, the output of the core module is stored in a pandas DataFrame where each row (frame) contains multiple instances of image data. Saving this output to a .csv or pickle is expensive and can take a while to read back into ram. Also, the processing of large datasets might make it difficult to read into ram. For that reason, the storage requirements should be outlined
Unexpected behaviour
Error when trying to install pytraction via pip install -e .
in local docker container:
ERROR: Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.
How to reproduce
docker-compose up -d --build
pip install -e .
This version removes the obsolete widim.pyx and related tests and notebooks. We found this algorithm in Cython to be obsolete due to the newer and debugged windef.py
https://nbviewer.jupyter.org/github/OpenPIV/openpiv-python/blob/master/openpiv/examples/notebooks/window_deformation_comparison.ipynb
Check if the Pere Roca-Cusachs TFM package is online / contact
The current implementation of scipy.optimize.fminbound
is annoyingly slow (10-20s)
traction_config = TractionForceConfig(E=E, scaling_factor=pix_per_um, config=config)
img, ref, roi = traction_config.load_data(img_path, ref_path)
leads to an error when scikit-learn is v1.0 or above.
It works with scikit-learn v0.24.2, I suggest pinning the version for now
this line of code (line 71 of pipeline_cnn.py as of commit 6f1968a):
best_model = torch.hub.load_state_dict_from_url('https://docs.google.com/uc?export=download&id=13CLZoNyvCt2K46UvAyHUqH7099FbnBh_')
errors on _cuda_deserialize.
Assuming that this model specifically is cuda-based and therefore cannot be deserialised for cpu computation.
Should the model be ensured to work across both CUDA and CPU compute?
Same issue from @Phlair raised in autoballs
There are no requirements file and dependencies which are causing the initial build to fail.
In some traction force maps I noticed small spots of extremely high forces that were not associated with a cell or other structure but did sometimes correspond to bead movement.
Here is the link to some example images and the outputs I got. For each image it contains following files:
filename | description | comment |
---|---|---|
Magnitude map ... | magnitude map generated with FiJi pipeline | |
pos_... | cropped images and roi used for pytraction pipeline; pdf file of traction force map generated by pytraction |
wc = with cell; woc = without cell |
..._RAW | raw images | ch00 = phase contrast; ch01 = beads |
Andrea and Aki to start testing the script and contacting former members. To check data.
Currently, the input for an ROI is a single boundary. However, the user might have multiple x,y boundaries or want to input the boundary as a tuple. The get ROI function should recursively get the ROIs. There should also be a setting to set the ROI as the noisy region.
~\miniconda3\envs\pytraction\lib\site-packages\pytraction\fourier.py in fourier_xu(pos, vec, meshsize, E, s, grid_mat)
16 kx_vec = 2*np.pi/i_max/meshsize*np.concatenate([np.arange(0,(i_max-1)/2), -np.arange(i_max/2,0, -1)])
17 kx_vec = np.expand_dims(kx_vec, axis=0)
---> 18 ky_vec = 2*np.pi/j_max/meshsize*np.concatenate([np.arange(0,(j_max-1)/2), -np.arange(j_max/2,0, -1)])
19 ky_vec = np.expand_dims(ky_vec, axis=0)
20
ZeroDivisionError: float division by zero
Add a separate Contributing.md file to guidance on PRs and issues.
Samples that undergo large drift cause the PIV measurements to artifacts on the edges. After the drift correction using align_slice()
the drift is corrected but the PIV thinks this is a displacement.
The whole system (in general) works end to end with the exception of a few bugs. However, sometimes a window size needs to be defined using the following function. This is largely determined by the bead density in the image. The following function needs to take in a bead reference and determine the most appropriate window size to the base 2.
Line 38 in a4821f6
Use segmentation models to automatically segment cells. This should be done using an EfficentNet-B0 backbone so it is lightweight and can be easily run on CPU.
Please see quick and simple version in autoballs
shell script causing issues on installing.
@Phlair python version?
I tried using a Path object from pathlib to automate a folder analysis, and it seems that load_data() doesn't work with Path objects (at least on Windows). Code:
pix_per_um = 1.0 / 0.1018971
E = 3000
p = Path('../data/')
dir = p / 'cells' / '1kPa'
refs = set(dir .glob('*_ref.tif'))
imgs = set(dir .glob('*.tif')) - refs
dataset = list(zip(sorted(imgs), sorted(refs)))
traction_obj = TractionForce(pix_per_um, E=E)
for img_path, ref_path in dataset:
img, ref, _ = traction_obj.load_data(img_path, ref_path)
Raises:
RuntimeWarning: Please ensure that the input image has shape (f,c,w,h) the current shape is (1088, 1590)
This is (at least temporarily) solved by changing the last line to:
img, ref, _ = traction_obj.load_data(str(img_path), str(ref_path))
TFM tools can be annoying for people without programming experience. @andimi mentioned that he might be interested in building a web UI for the analysis of data.
After a quick chat with Aki this morning he mentioned this could be particularly powerful and useful for bio-focused labs.
Running line:
log2 = traction_obj.process_stack(img, ref, roi=roi, verbose=1)
in the usage.ipynb notebook on Windows10 (after correcting for the shapely issue #29) gives this error
For non-square images the following exception is raised:
ValueError: operands could not be broadcast together with shapes (56,28) (28,56)
This is caused by the following lines
#pytraction\reg_fourier.py
Ftfx = Ginv_xx*Ftux + Ginv_xy*Ftuy
Ftfy = Ginv_xy*Ftux + Ginv_yy*Ftuy
It can be solved by
#pytraction\fourier.py
# Fourier transform displacement field
ftux = np.fft.fft2(u[:,:,0]).T
ftuy = np.fft.fft2(u[:,:,1]).T
However, the final traction map needs to be transposed again.
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-1-7851ea25c172> in <module>
3 import matplotlib.pyplot as plt
4
----> 5 from pytraction.core import TractionForce
6 from pytraction.utils import plot
~\miniconda3\envs\pytraction\lib\site-packages\pytraction\core.py in <module>
8 from collections import defaultdict
9 from read_roi import read_roi_file
---> 10 from shapely import geometry
11 import pickle
12 import zipfile
~\miniconda3\envs\pytraction\lib\site-packages\shapely\geometry\__init__.py in <module>
2 """
3
----> 4 from .base import CAP_STYLE, JOIN_STYLE
5 from .geo import box, shape, asShape, mapping
6 from .point import Point, asPoint
~\miniconda3\envs\pytraction\lib\site-packages\shapely\geometry\base.py in <module>
17
18 from shapely.affinity import affine_transform
---> 19 from shapely.coords import CoordinateSequence
20 from shapely.errors import WKBReadingError, WKTReadingError
21 from shapely.geos import WKBWriter, WKTWriter
~\miniconda3\envs\pytraction\lib\site-packages\shapely\coords.py in <module>
6 from ctypes import byref, c_double, c_uint
7
----> 8 from shapely.geos import lgeos
9 from shapely.topology import Validating
10
~\miniconda3\envs\pytraction\lib\site-packages\shapely\geos.py in <module>
152 if os.getenv('CONDA_PREFIX', ''):
153 # conda package.
--> 154 _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))
155 else:
156 try:
~\miniconda3\envs\pytraction\lib\ctypes\__init__.py in __init__(self, name, mode, handle, use_errno, use_last_error, winmode)
379
380 if handle is None:
--> 381 self._handle = _dlopen(self._name, mode)
382 else:
383 self._handle = handle
FileNotFoundError: Could not find module 'C:\Users\ryan\miniconda3\envs\pytraction\Library\bin\geos_c.dll' (or one of its dependencies). Try using the full path with constructor syntax.
Hi @Phlair,
We will potentially be writing a Java plugin to access the python code. Thought this might be of interest to you :)
Let's discuss further in person?
A declarative, efficient, and flexible JavaScript library for building user interfaces.
๐ Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. ๐๐๐
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google โค๏ธ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.