b_k = 1;
while (b_k <= iv0[1]) {
h = vplus_data[0];
u1 = vmax->data[(int)((1.0 + (double)k) + 1.0) - 1];
if ((h <= u1) || rtIsNaN(u1)) {
minval_data_idx_0 = h;
} else {
minval_data_idx_0 = u1;
}
b_k = 2;
}
b_k = 1;
while (b_k <= iv0[1]) {
h = vmin->data[(int)((1.0 + (double)k) + 1.0) - 1];
if ((h >= minval_data_idx_0) || rtIsNaN(minval_data_idx_0)) {
} else {
h = minval_data_idx_0;
}
vplus_data[0] = h;
b_k = 2;
}
this code is compared to min function to get the minimum value for h or u1,
can anyone tell me why matlab generate such syntax? why is the while loop, although I dont see any changes inside the while block!
matlab code
v(k+1) = max(vmin(k+1), min(vplus, vmax(k+1)));
notice there are two loops for max min function
I can't explain why the generated code ends up like that, but it must have something to do with how you wrote your Matlab code. It looks strange, but if it works then it probably doesn't matter.
If you're curious about the generator, start from something very simple and watch how the generated code changes as your code gets more complex. Try variations like these:
z = min(x, y);
z = max(w, min(x, y));
for i = 1:length(v)
z(i) = max(w, min(v(i), y));
end
Keep on modifying the test code a bit at a time to make it like the code that prompted this question and maybe you'll discover exactly what triggers the result you're seeing.
Related
So I'm doing a simple oscilloscope in C. It reads audio data from the output buffer (and drops buffer write counter when called so the buffer is refreshed). I tried making simple zero-cross triggering since most of the time users will see simple (sine, pulse, saw, triangle) waves but the best result I got with the code below is a wave that jumps back and forth for half of its cycle. What is wrong?
Signal that is fed in goes from -32768 to 32767 so zero is where it should be.
If you didn't understand what I meant you can see the video: click
Upd: Removed the code unrelated to triggering so all function may be understood easier.
extern Mused mused;
void update_oscillscope_view(GfxDomain *dest, const SDL_Rect* area)
{
if (mused.output_buffer_counter >= OSC_SIZE * 12) {
mused.output_buffer_counter = 0;
}
for (int x = 0; x < area->h * 0.5; x++) {
//drawing a black rect so bevel is hidden when it is under oscilloscope
gfx_line(domain,
area->x, area->y + 2 * x,
area->x + area->w - 1, area->y + 2 * x,
colors[COLOR_WAVETABLE_BACKGROUND]);
}
Sint32 sample, last_sample, scaled_sample;
for (int i = 0; i < 2048; i++) {
if (mused.output_buffer[i] < 0 && mused.output_buffer[i - 1] > 0) {
//here comes the part with triggering
if (i < OSC_SIZE * 2) {
for (int x = i; x < area->w + i; ++x) {
last_sample = scaled_sample;
sample = (mused.output_buffer[2 * x] + mused.output_buffer[2 * x + 1]) / 2;
if (sample > OSC_MAX_CLAMP) { sample = OSC_MAX_CLAMP; }
if (sample < -OSC_MAX_CLAMP) { sample = -OSC_MAX_CLAMP; }
if (last_sample > OSC_MAX_CLAMP) { last_sample = OSC_MAX_CLAMP; }
if (last_sample < -OSC_MAX_CLAMP) { last_sample = -OSC_MAX_CLAMP; }
scaled_sample = (sample * OSC_SIZE) / 32768;
if(x != i) {
gfx_line(domain,
area->x + x - i - 1, area->h / 2 + area->y + last_sample,
area->x + x - i, area->h / 2 + area->y + scaled_sample,
colors[COLOR_WAVETABLE_SAMPLE]);
}
}
}
return;
}
}
}
During debugging, I simplified the code until it started working. Thanks Clifford.
I found a trigger index i (let's say it is array index 300). Modified it so that the oscilloscope was drawing lines from [(2 * i) + offset] to [(2 * i + 1) + offset], thus an incorrect picture was formed.
I used (2 * i), because I wanted long waves to fit into oscilloscope. I replaced it with drawing from [i + offset] to [i + 1 + offset] and that solved a problem.
Afterwards, I implemented "horizontal scale 0.5x properly.
The output waveform still jumps a little, but overall it holds it in place.
This is the function I have written for 2D Convolution in C:
typedef struct PGMImage{
int w;
int h;
int* data;
}GrayImage;
GrayImage Convolution2D(GrayImage image,GrayImage kernel){
int aH,aW,bW,bH,r,c,x,y,xx,yy,X,Y;
int temp = 0;
GrayImage conv;
CreateGrayImage(&conv,image.w,image.h);
aH = image.h;
aW = image.w;
bH = kernel.h;
bW = kernel.w;
if(aW < bW || aH < bH){
fprintf(stderr,"Image cannot have smaller dimensions than the blur kernel");
}
for(r = aH-1;r >= 0;r--){
for(c = aW-1;c >= 0;c--){
temp = 0;
for(y = bH-1;y >= 0;y--){
yy = bH - y -1;
for(x = bW-1;x >= 0;x--){
xx = bW - x - 1;
X = c + (x - (bW/2));
Y = r + (y - (bH/2));
if(X >= 0 && X < aW && Y >= 0 && Y < aH){
temp += ((kernel.data[(yy*bW)+xx])*(image.data[(Y*aW)+X]));
}
}
}
conv.data[(r*aW)+c] = temp;
}
}
return conv;
}
I reproduced this function in Matlab and found that it overestimates the values for certain pixels as compared to the regular 2D Convolution function in Matlab (conv2D). I can't figure out where I am going wrong with the logic. Please help.
EDIT:
Here's the stock image I am using (512*512):
https://drive.google.com/file/d/0B3qeTSY-DQRvdWxCZWw5RExiSjQ/view?usp=sharing
Here's the kernel (3*3):
https://drive.google.com/file/d/0B3qeTSY-DQRvdlQzamcyVmtLVW8/view?usp=sharing
On using the above function I get
46465 46456 46564
45891 46137 46158
45781 46149 46030
But Matlab's conv2 gives me
46596 46618 46627
46073 46400 46149
45951 46226 46153
for the same pixels (rows:239-241,col:316:318)
This is the Matlab code I am using to compare the values:
pgm_img = imread('path\to\lena512.pgm');
kernel = imread('path\to\test_kernel.pgm');
sz_img = size(pgm_img);
sz_ker = size(kernel);
conv = conv2(double(pgm_img),double(kernel),'same');
pgm_img = padarray(pgm_img,floor(0.5*sz_ker),'both');
convolve = zeros(sz_img);
for i=floor(0.5*sz_ker(1))+1:floor(0.5*sz_ker(1))+sz_img(1)
for j=floor(0.5*sz_ker(2))+1:floor(0.5*sz_ker(2))+sz_img(2)
startX = j - floor(sz_ker(2)/2);
startY = i - floor(sz_ker(1)/2);
endX = j + floor(sz_ker(2)/2);
endY = i + floor(sz_ker(1)/2);
block = pgm_img(startY:endY,startX:endX);
prod = double(block).*double(kernel);
convolve(i-floor(0.5*sz_ker(1)),j-floor(0.5*sz_ker(2))) = sum(sum(prod));
end
end
disp(conv(239:241,316:318));
disp(convolve(239:241,316:318));
One obvious difference is that your c code uses ints, while the matlab code uses doubles. Change your c code to use doubles, and see if the results are still different.
I created Image Convolution library for simple cases of an image which is a simple 2D Float Array.
The function supports arbitrary kernels and verified against MATLAB's implementation.
So all needed on your side is calling it with your generated Kernel.
You can use its generated DLL inside MATLAB and see it yields same results as MATLAB's Image Convolution functions.
Image Convolution - GitHub.
As I mentioned in the instructions, I need to write this function without using any if statements other than for validity checks/variable guard checks. But I'm having trouble determining whether or not my code fits this specification. And if my code doesn't work, how would I write it without using the prohibited if statements?
Here's the problem:
Here's my code:
int badgePoly(int x, int y) {
int mavenbadge, incirclebadge, herobadge, weatherbadge, inquattrobadge,
bronzebadges, polybadge;
if ((x >= 1 && x <= 20) && (y >= 1 && y <= 20)) {
mavenbadge = badgeBoundaryMaven(x, y);
incirclebadge = badgeInnerCircle(x, y);
herobadge = badgeLocalHero(x, y);
weatherbadge = badgeBoringWeather(x, y);
inquattrobadge = badgeInQuattro(x, y);
bronzebadges = mavenbadge + incirclebadge + herobadge + weatherbadge +
inquattrobadge;
if (bronzebadges >= 2) {
polybadge = 1;
}
else {
polybadge = 0;
}
}
else {
polybadge = -1;
}
return polybadge;
}
NOTE: mavenbadge, incirclebadge, herobadge, weatherbadge, and inquattrobadge are all the different types of bronze badges that you can earn in a field (x, y); and bronzebadges is just the sum of all of these to find total number of bronze badges in a field (x, y).
It's simple. Replace this:
if (bronzebadges >= 2) {
polybadge = 1;
} else {
polybadge = 0;
}
With this:
polybadge = (bronzebadges >= 2);
Now, obviously if one of the badgeXXX functions uses if statements you'll need to do more, but I can't help you with that without the code.
Without using the comparison operator:
polybadge = !!(bronzebadges & ~1);
nightcracker's version is more sane though.
A friend of mine needs an analogue of MatLAB's betainc function for some statistical calculations in programmable logic devices (PLD's) (I'm not a man of hardware and don't know any details on his project yet).
Therefore using precompiled libraries is not an option.
She needs an implementation in raw C considering that each of the three parameters is variable.
Is there a good one somewhere on the Web?
Thank you so much in advance!
I know I'm late to answering, but your currently accepted answer (using code from "Numerical Recipes") has a terrible license. Also, it doesn't help others that don't already own the book.
Here is raw C99 code for the incomplete beta function released under the Zlib license:
#include <math.h>
#define STOP 1.0e-8
#define TINY 1.0e-30
double incbeta(double a, double b, double x) {
if (x < 0.0 || x > 1.0) return 1.0/0.0;
/*The continued fraction converges nicely for x < (a+1)/(a+b+2)*/
if (x > (a+1.0)/(a+b+2.0)) {
return (1.0-incbeta(b,a,1.0-x)); /*Use the fact that beta is symmetrical.*/
}
/*Find the first part before the continued fraction.*/
const double lbeta_ab = lgamma(a)+lgamma(b)-lgamma(a+b);
const double front = exp(log(x)*a+log(1.0-x)*b-lbeta_ab) / a;
/*Use Lentz's algorithm to evaluate the continued fraction.*/
double f = 1.0, c = 1.0, d = 0.0;
int i, m;
for (i = 0; i <= 200; ++i) {
m = i/2;
double numerator;
if (i == 0) {
numerator = 1.0; /*First numerator is 1.0.*/
} else if (i % 2 == 0) {
numerator = (m*(b-m)*x)/((a+2.0*m-1.0)*(a+2.0*m)); /*Even term.*/
} else {
numerator = -((a+m)*(a+b+m)*x)/((a+2.0*m)*(a+2.0*m+1)); /*Odd term.*/
}
/*Do an iteration of Lentz's algorithm.*/
d = 1.0 + numerator * d;
if (fabs(d) < TINY) d = TINY;
d = 1.0 / d;
c = 1.0 + numerator / c;
if (fabs(c) < TINY) c = TINY;
const double cd = c*d;
f *= cd;
/*Check for stop.*/
if (fabs(1.0-cd) < STOP) {
return front * (f-1.0);
}
}
return 1.0/0.0; /*Needed more loops, did not converge.*/
}
It is taken from this Github repo. There is also a very thorough write-up about how it works here.
Hope you find this helpful.
Or you could read "Numerical Recipes in C" and find complete source. You'll have to worry about licensing issues, but it'll have a lucid explanation of what the function and its implementation are about.
I've been using the FJCore library in a Silverlight project to help with some realtime image processing, and I'm trying to figure out how to get a tad more compression and performance out of the library. Now, as I understand it, the JPEG standard allows you to specify a chroma subsampling ratio (see http://en.wikipedia.org/wiki/Chroma_subsampling and http://en.wikipedia.org/wiki/Jpeg); and it appears that this is supposed to be implemented in the FJCore library using the HsampFactor and VsampFactor arrays:
public static readonly byte[] HsampFactor = { 1, 1, 1 };
public static readonly byte[] VsampFactor = { 1, 1, 1 };
However, I'm having a hard time figuring out how to use them. It looks to me like the current values are supposed to represent 4:4:4 subsampling (e.g., no subsampling at all), and that if I wanted to get 4:1:1 subsampling, the right values would be something like this:
public static readonly byte[] HsampFactor = { 2, 1, 1 };
public static readonly byte[] VsampFactor = { 2, 1, 1 };
At least, that's the way that other similar libraries use these values (for instance, see the example code here for libjpeg).
However, neither the above values of {2, 1, 1} nor any other set of values that I've tried besides {1, 1, 1} produce a legible image. Nor, in looking at the code, does it seem like that's the way it's written. But for the life of me, I can't figure out what the FJCore code is actually trying to do. It seems like it's just using the sample factors to repeat operations that it's already done -- i.e., if I didn't know better, I'd say that it was a bug. But this is a fairly established library, based on some fairly well established Java code, so I'd be surprised if that were the case.
Does anybody have any suggestions for how to use these values to get 4:2:2 or 4:1:1 chroma subsampling?
For what it's worth, here's the relevant code from the JpegEncoder class:
for (comp = 0; comp < _input.Image.ComponentCount; comp++)
{
Width = _input.BlockWidth[comp];
Height = _input.BlockHeight[comp];
inputArray = _input.Image.Raster[comp];
for (i = 0; i < _input.VsampFactor[comp]; i++)
{
for (j = 0; j < _input.HsampFactor[comp]; j++)
{
xblockoffset = j * 8;
yblockoffset = i * 8;
for (a = 0; a < 8; a++)
{
// set Y value. check bounds
int y = ypos + yblockoffset + a; if (y >= _height) break;
for (b = 0; b < 8; b++)
{
int x = xpos + xblockoffset + b; if (x >= _width) break;
dctArray1[a, b] = inputArray[x, y];
}
}
dctArray2 = _dct.FastFDCT(dctArray1);
dctArray3 = _dct.QuantizeBlock(dctArray2, FrameDefaults.QtableNumber[comp]);
_huf.HuffmanBlockEncoder(buffer, dctArray3, lastDCvalue[comp], FrameDefaults.DCtableNumber[comp], FrameDefaults.ACtableNumber[comp]);
lastDCvalue[comp] = dctArray3[0];
}
}
}
And notice that in the i & j loops, they're not controlling any kind of pixel skipping: if HsampFactor[0] is set to two, it's just grabbing two blocks instead of one.
I figured it out. I thought that by setting the sampling factors, you were telling the library to subsample the raster components itself. Turns out that when you set the sampling factors, you're actually telling the library the relative size of the raster components that you're providing. In other words, you need to do the chroma subsampling of the image yourself, before you ever submit it to the FJCore library for compression. Something like this is what it's looking for:
private byte[][,] GetSubsampledRaster()
{
byte[][,] raster = new byte[3][,];
raster[Y] = new byte[width / hSampleFactor[Y], height / vSampleFactor[Y]];
raster[Cb] = new byte[width / hSampleFactor[Cb], height / vSampleFactor[Cb]];
raster[Cr] = new byte[width / hSampleFactor[Cr], height / vSampleFactor[Cr]];
int rgbaPos = 0;
for (short y = 0; y < height; y++)
{
int Yy = y / vSampleFactor[Y];
int Cby = y / vSampleFactor[Cb];
int Cry = y / vSampleFactor[Cr];
int Yx = 0, Cbx = 0, Crx = 0;
for (short x = 0; x < width; x++)
{
// Convert to YCbCr colorspace.
byte b = RgbaSample[rgbaPos++];
byte g = RgbaSample[rgbaPos++];
byte r = RgbaSample[rgbaPos++];
YCbCr.fromRGB(ref r, ref g, ref b);
// Only include the byte in question in the raster if it matches the appropriate sampling factor.
if (IncludeInSample(Y, x, y))
{
raster[Y][Yx++, Yy] = r;
}
if (IncludeInSample(Cb, x, y))
{
raster[Cb][Cbx++, Cby] = g;
}
if (IncludeInSample(Cr, x, y))
{
raster[Cr][Crx++, Cry] = b;
}
// For YCbCr, we ignore the Alpha byte of the RGBA byte structure, so advance beyond it.
rgbaPos++;
}
}
return raster;
}
static private bool IncludeInSample(int slice, short x, short y)
{
// Hopefully this gets inlined . . .
return ((x % hSampleFactor[slice]) == 0) && ((y % vSampleFactor[slice]) == 0);
}
There might be additional ways to optimize this, but it's working for now.