Edge Detection in C - c

Working on an edge detection function. Looking back at my code I think that I have concept / logic down. But the results aren't coming out the way it should.
typedef struct {
int Red;
int Green;
int Blue;
} GTOTALS;
// Detect edges
void edges(int height, int width, RGBTRIPLE image[height][width])
{
const int MAX = 3;
// Copy Image
RGBTRIPLE Copy[height][width];
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
Copy[i][j] = image[i][j];
}
}
// Gx and Gy Grids 3 x 3
int Gx[MAX][MAX] = {
{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}
};
int Gy[MAX][MAX] = {
{-1, -2, -1},
{0, 0, 0},
{1, 2, 1}
};
// Loop through each pixel
for (int Rows = 0; Rows < height; Rows++)
{
for (int Cols = 0; Cols < width; Cols++)
{
// Hold RGB Values + Refresh Current Pixel RGB
int CRed = 0, CGreen = 0, CBlue = 0;
// Store Gx and Gy RGB Values
GTOTALS X;
GTOTALS Y;
// Loop through surrouding pixels
for (int S_Rows = Rows - 1, R = 0; S_Rows <= Rows + 1; S_Rows++, R++)
{
for (int S_Cols = Cols - 1, C = 0; S_Cols <= Cols + 1; S_Cols++, C++)
{
// Check Pixel Validity
if ((S_Rows >= 0) && (S_Rows < height) && (S_Cols >= 0) && (S_Cols < width))
{
// RGB Gx Total Values
X.Red += Copy[S_Rows][S_Cols].rgbtRed * Gx[R][C]; // Current Pixel Red * Gx[N][N]
X.Green += Copy[S_Rows][S_Cols].rgbtGreen * Gx[R][C]; // Current Pixel Green * Gx[N][N]
X.Blue += Copy[S_Rows][S_Cols].rgbtBlue * Gx[R][C]; // Current Pixel Blue * Gx[N][N]
// RGB Gy Total Values
Y.Red += Copy[S_Rows][S_Cols].rgbtRed * Gy[R][C]; // Current Pixel Red * Gy[N][N]
Y.Green += Copy[S_Rows][S_Cols].rgbtGreen * Gy[R][C]; // Current Pixel Green * Gy[N][N]
Y.Blue += Copy[S_Rows][S_Cols].rgbtBlue * Gy[R][C]; // Current Pixel Blue * Gy[N][N]
}
}
}
// Value = Square Root(Gx^2 + Gx^2)
CRed = round( sqrt( pow(X.Red, 2.0) + pow(Y.Red, 2.0) ) );
CGreen = round( sqrt( pow(X.Green, 2.0) + pow(Y.Green, 2.0) ) );
CBlue = round( sqrt( pow(X.Blue, 2.0) + pow(Y.Blue, 2.0) ) );
// MAX 255
Cap(&CRed);
Cap(&CGreen);
Cap(&CBlue);
// Update Target Pixel
image[Rows][Cols].rgbtRed = CRed;
image[Rows][Cols].rgbtGreen = CGreen;
image[Rows][Cols].rgbtBlue = CBlue;
}
}
return;
}
void Cap(int *Value)
{
if (*Value > 255)
{
*Value = 255;
}
}
When I run the prograM most of the RGB values turn out to be 255. I've played around with using different data types and moving around when variables are created but that doesn't seem to help. I've also tried miniature versions of the code and all seems to work as intended but not sure why when I add it together it doesn't seem to give the correct results

Here is description of Sobel filter
// from Source to Destination
int ComputeBoundaries(unsigned char S[], unsigned char D[])
{
unsigned int iX,iY; /* indices of 2D virtual array (image) = integer coordinate /
unsigned int i; / index of 1D array /
/ sobel filter */
unsigned char G, Gh, Gv;
// boundaries are in D array ( global var )
// clear D array
memset(D, iColorOfBasin1, iSize*sizeof(*D)); // for heap-allocated arrays, where N is the number of elements = FillArrayWithColor(D , iColorOfBasin1);
// printf(" find boundaries in S array using Sobel filter\n");
#pragma omp parallel for schedule(dynamic) private(i,iY,iX,Gv,Gh,G) shared(iyMax,ixMax)
for(iY=1;iY<iyMax-1;++iY){
for(iX=1;iX<ixMax-1;++iX){
Gv= S[Give_i(iX-1,iY+1)] + 2S[Give_i(iX,iY+1)] + S[Give_i(iX-1,iY+1)] - S[Give_i(iX-1,iY-1)] - 2S[Give_i(iX-1,iY)] - S[Give_i(iX+1,iY-1)];
Gh= S[Give_i(iX+1,iY+1)] + 2S[Give_i(iX+1,iY)] + S[Give_i(iX-1,iY-1)] - S[Give_i(iX+1,iY-1)] - 2S[Give_i(iX-1,iY)] - S[Give_i(iX-1,iY-1)];
G = sqrt(GhGh + GvGv);
i= Give_i(iX,iY); /* compute index of 1D array from indices of 2D array /
if (G==0) {D[i]=255;} / background /
else {D[i]=0;} / boundary */
}
}
return 0;
}
// copy from Source to Destination
int CopyBoundaries(unsigned char S[], unsigned char D[])
{
unsigned int iX,iY; /* indices of 2D virtual array (image) = integer coordinate /
unsigned int i; / index of 1D array */
//printf("copy boundaries from S array to D array \n");
for(iY=1;iY<iyMax-1;++iY)
for(iX=1;iX<ixMax-1;++iX)
{i= Give_i(iX,iY); if (S[i]==0) D[i]=0;}
return 0;
}
Here is the image and a full program
result:

Related

Passing 3d arrays to a convolution function in C

I need to do a function that executes a 2D convolution and for that I need to pass to it a couple of 3d arrays. However I've been told my method is not an ideal way to do this.
First, I declare the variables:
typedef struct {
float img[224][224][3];
} input_224_t;
typedef struct {
float img[112][112][32];
} input_112_t;
typedef struct {
float img[3][3][32];
} weightsL1_t;
Then, the convolution looks like this:
void convolution(input_224_t* N, weightsL1_t* M, input_112_t* P, int size, int ksize, int channels, int filters, int stride)
{
// Effectively pads the image before convolution. Technically also works for pointwise, but it's inefficient.
// find center position of kernel (half of kernel size)
int kcenter = ksize / 2;
// Declare output indexes
int a = 0;
int b = -1;
for (int k = 0; k < filters; ++k) // filters
{
for (int i = 0; i < size; i = i + stride) // rows
{
for (int j = 0; j < size; j = j + stride) // columns
{
b++;
if (b == ksize) {b=0;a++;} // Increment output index
for (int m = 0; m < ksize; ++m) // kernel rows
{
for (int n = 0; n < ksize; ++n) // kernel columns
{
// Index of input signal, used for checking boundary
int ii = i + (m - kcenter);
int jj = j + (n - kcenter);
// Ignore input samples which are out of bound
if (ii >= 0 && ii < size && jj >= 0 && jj < size) {
for (int p = 0; p < channels; ++p) // channels
{
P.img[a][b][k] += N.img[ii][jj][p] * M.img[m][n][k]; // convolve
}
}
}
}
}
}
}
}
(This returns "field 'img' could not be resolved" at the "convolve" line)
I then import the values into the correct structs (which was a previous question of mine which has been answered: Write values to a 3D array inside a struct in C) and I call the function like this:
convolution(test_image, test_filter, test_result, 6, 3, 1, 1, 2);
I have been told in my previous question that this is not an ideal way to handle 3D arrays, and that it may use a lot more memory than I intend. This is a very memory-intensive process, and this will run in an embedded system, so optimizing memory allocation is paramount.
My objective, if possible, is to only allocate one of each of these 3D arrays at any point in time as to not use unnecessary memory, and do it in a way that this space can be freed at a later point.
Thank you in advance.
You could use Variable Length Arrays as function parameters.
void convolve(int isize, // width/height of input (224)
int osize, // width/height of output (112)
int ksize, // width/height of kernel (3)
int stride, // shift between input pixels, between consecutive outputs
int pad, // offset between (0,0) pixels between input and output
int idepth, int odepth, // number of input and output channels
float idata[isize][isize][idepth],
float odata[osize][osize][odepth],
float kdata[idepth][ksize][ksize][odepth])
{
// iterate over the output
for (int oy = 0; oy < osize; ++oy) {
for (int ox = 0; ox < osize; ++ox) {
for (int od = 0; od < odepth; ++od) {
odata[oy][ox][od] = 0;
for (int ky = 0; ky < ksize; ++ky) {
for (int kx = 0; kx < ksize; ++kx) {
// map position in output and kernel to the input
int iy = stride * oy + ky - pad;
int ix = stride * ox + kx - pad;
// use only valid inputs
if (iy >= 0 && iy < isize && ix >= 0 && ix < isize)
for (int id = 0; id < idepth; ++id)
odata[oy][ox][od] += kdata[id][ky][kx][od] * idata[iy][ix][id];
}}
}}}
}
Typical usage would be:
// allocate input
float (*idata)[224][3] = calloc(224, sizeof *idata);
// fill input using idata[y][x][d] syntax
// allocate kernel
float (*kdata)[3][3][32] = calloc(3, sizeof *kdata);
// fill kernel
// allocate output
float (*odata)[112][32] = calloc(112, sizeof *odata);
convolve(224, 112, 3, // input, output, kernel size
2, // stride
1, // pad input by one pixel what will center the kernel
3, 32, // number of input and output channels
idata, odata, kdata);
// free memory if it is no longer used
free(idata); free(odata); free(kdata);
The multidimentional arrays could be allocated with:
float (*arr)[10][20][30] = malloc(sizeof *arr);
however accessing elements is a bit cumbersome due to syntax (*arr)[i][j][j]. Therefore it is simple to use a pointer to the first element of array and allocate multiple subarrays at this pointer.
float (*arr)[20][30] = malloc(10 * sizeof *arr);
or with calloc() with automated zeroing and avoiding overflows.
float (*arr)[20][30] = calloc(10, sizeof *arr);
BTW. I suggest to reorder dimensions of the kernel to ODEPTH x KSIZE x KSIZE x IDEPTH. This would make iterating over the kernel more cache-friendly.

Ray tracer not giving different light intensities based on direction

Goal: I am trying to create a ray tracer in C. I just added in a light source that should give each of my three spheres a shading effect based on where the light is. If the light is to the left of all of them, a shadow should be cased on the right.
Problem: When changing the light intensities and position of the light, all the spheres are changed uniformly. The spheres will be more or less lit equally and there is no variation of lighting on individual pixels on the sphere.
My debugging attempts: I have tried looking through the variable outputs by printing out a lot of different info and I think the source comes from my variable
diffuse_light_intensity
which does not change much (through all the iterations on the screen the value changes twice when it should be changing quite often due to the angles of the light on the surface changing quite a bit)
My Code: (my theory is the problem lies in scene_intersect() or cast_ray())
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <limits.h>
typedef struct {
float position[3];
float intensity;
} Light;
typedef struct {
float diffuse_color[3];
} Material;
typedef struct {
float center[3];
float radius;
Material material;
} Sphere;
int arrSub(const float arr1[], const float arr2[], float subArr[], int length) {
/*
Requires 3 equally sized arrays (denoted as length),
arr1 - arr2 will result in the third array subArr
*/
for (int i = 0; i < length; i++) {
subArr[i] = arr1[i] - arr2[i];
}
return 0;
}
int arrAdd(const float arr1[], const float arr2[], float addArr[], int length) {
/*
Requires 3 equally sized arrays (denoted as length),
arr1 + arr2 will result in the third array subArr
*/
for (int i = 0; i < length; i++) {
addArr[i] = arr1[i] + arr2[i];
}
return 0;
}
int arrScalarMult(const float arr1[], float scalar, float newArr[], int length) {
/*
Requires 3 equally sized arrays (denoted as length),
arr1 - arr2 will result in the third array subArr
*/
for (int i = 0; i < length; i++) {
newArr[i] = arr1[i] * scalar;
}
return 0;
}
float dotProduct(const float arr1[], const float arr2[], int length) {
/*
Returns the dot product of two equal sized arrays
(treated as vectors)
a (dot) b = a1b1 + a2b2 + ... anbn
*/
float result = 0;
for (int i = 0; i < length; i++) {
result += arr1[i] * arr2[i];
}
return result;
}
int normalize(float arr[], int len) {
//Normalize a vector (array)
float sumSqr;
float norm;
for (int i = 0; i < len; i++) {
sumSqr += arr[i] * arr[i];
}
norm = sqrt(sumSqr);
for (int i = 0; i < len; i++) {
arr[i] = arr[i] / norm;
}
return 0;
}
bool ray_intersect(const float origin[], const float dir[], float t0, Sphere s) {
/*
Ray-Sphere Intersection
Vectors:
origin (the zero vector)
dir (direction vector)
L (vector from origin to center of sphere)
Scalars:
tca
d2
thc
t0
t1
*/
float L[3] = {0,0,0}; //The zero vector
arrSub(s.center, origin, L, 3); //L is now the vector from origin to the sphere's center
float tca = dotProduct(L, dir, 3); //Projection of L onto dir
float d2 = dotProduct(L, L, 3) - tca*tca;
if (d2 > s.radius * s.radius) return false; //There is no intersection, so return false.
float thc = sqrtf((s.radius*s.radius - d2));
t0 = tca - thc;
float t1 = tca + thc;
if (t0 < 0) {
t0 = t1;
}
if (t0 < 0) return false;
return true;
}
bool scene_intersect(const float origin[], const float dir[], const Sphere s[], int len, float hit[], float N[], Material * ptr_m) {
float sphere_dist = INT_MAX;
for (size_t i=0; i < len; i++) {
float dist_i;
if (ray_intersect(origin, dir, dist_i, s[i]) && dist_i < sphere_dist) {
sphere_dist = dist_i;
float dirDist[3];
arrScalarMult(dir, dist_i, dirDist, 3);
arrAdd(origin, dirDist, hit, 3);
float hitMinusCenter[3];
arrSub(hit, s[i].center, hitMinusCenter, 3);
normalize(hitMinusCenter, 3);
N[0] = hitMinusCenter[0];
N[1] = hitMinusCenter[1];
N[2] = hitMinusCenter[2];
* ptr_m = s[i].material;
}
}
return sphere_dist<1000;
}
int cast_ray(const float origin[], const float dir[], const Sphere s[], const Light l[], int l_size, unsigned char colorArr[]) {
float point[3], N[3];
Material m;
Material * ptr_m = &m;
if (!scene_intersect(origin, dir, s, 3, point, N, ptr_m)) {
//background
colorArr[0] = 5; //red
colorArr[1] = 100; //green
colorArr[2] = 250; //blue
} else {
float diffuse_light_intensity = 0;
float light_dir[3];
for (size_t i = 0; i < l_size; i++) {
arrSub(l[i].position, point, light_dir, 3);
normalize(light_dir, 3);
diffuse_light_intensity += l[i].intensity * ((0.f >= dotProduct(light_dir, N, 3) ? (0.f) : (dotProduct(light_dir, N, 3))));
}
//light up pixel
colorArr[0] = m.diffuse_color[0] * diffuse_light_intensity;
colorArr[1] = m.diffuse_color[1] * diffuse_light_intensity;
colorArr[2] = m.diffuse_color[2] * diffuse_light_intensity;
}
return 0;
}
int render(const Sphere s[], const Light l[], int l_length) {
/*
Creates image in a new color each step.
*/
const int width = 1024;
const int height = 768;
FILE *fp = fopen("fourth.ppm", "wb"); // Write in binary mode
(void) fprintf(fp, "P6\n%d %d\n255\n", width, height);
float fov = 3.1415926535/2.; // Field of View
#pragma omp parallel for
for (size_t j = 0; j < height; j++) {
for (size_t i = 0; i < width; i++) {
float x = (2*(i+.5)/(float)width - 1)*tan(fov/2.)*width/(float)height;
float y = -(2*(j+.5)/(float)height - 1)*tan(fov/2.);
float dir[] = {x,y,-1};
normalize(dir, 3);
unsigned char color[3];
const float origin[] = {0,0,0};
cast_ray(origin, dir, s, l, l_length, color);
(void) fwrite(color, 1, 3, fp);
}
}
(void) fclose(fp);
return 0;
}
int main(void) {
Material red = {255,0,0};
Material pink = {150,10,150};
Material gold = {255, 195, 0};
//Populate with spheres
Sphere s[3];
Sphere originalS = {{-3,0,-16},2,gold};
Sphere bigS = {{-1.0, -1.5, -12}, 3, red};
Sphere anotherS = {{7,5,-18},2,pink};
s[0] = originalS;
s[1] = bigS;
s[2] = anotherS;
//Add light source
Light l[1];
Light test_light = {{-20,20,20}, 1.5};
l[0] = test_light;
render(s,l, 1);
printf("Run success!\n");
return 0;
}
If any clarification is needed on my code please let me know, I am quite new to both C and stackoverflow.
There's a fundamental error in ray_intersect where you're passing the t0 variable by value, and not as a pointer, and therefore in the scene_intersect function its value is always zero.
The other problem is that you don't initialize the sumSqr in the normalize function, resulting in that function returning NaN for each vector component.
With those two fixed I get something approximating shaded balls. The errors in that image are caused by failing to ensure that your output pixel values fall in the range [0, 255].
NB: both of these first errors are detected if you turn on full compiler error checking, warning you of uninitialised variables being used.

Generating a barcode BMP in c

I've been assigned to create a barcode image from a number generated randomly by me (that I've already done). So far I tried to create a BMP file and put in a simple black and white columns but my picture is distorted by other colors and not even in columns. I didn't even start to write the barcode itself (that itself is still a mystery to me), I tried to create it for almost 2 weeks now and to no avail.. I mostly need a program that writes black or white to the program by columns or row by row so i can put black or white at will.
This is my code:
`int width, hight;
width = 141;
hight = 70;
FILE* barcode;
fopen_s(&barcode,NEWBARCODE, "wb");
int filesize = 54 + 3 * width*height; //w is your image width, h is image height, both int
char bmpfileheader[14] = { 'B','M', 0,0,0,0, 0,0, 0,0, 54,0,0,0 };
char bmpinfoheader[40] = { 0x28,0,0,0, 141,0,0,0, 70,0,0,0, 1,0, 24,0,0,0,0,0,0x8c,0x05,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
unsigned char white[3] = {255,255,255 };
unsigned char black[3] = {0,0,0 };
unsigned char pad[1] = { 0 };
bmpfileheader[2] = (unsigned char)(filesize);
bmpfileheader[3] = (unsigned char)(filesize >> 8);
bmpfileheader[4] = (unsigned char)(filesize >> 16);
bmpfileheader[5] = (unsigned char)(filesize >> 24);
fwrite(&bmpfileheader, 1, 14, barcode);
fwrite(&bmpinfoheader, 1, 40, barcode);
for (int i = 0; i < height; i++) { //columns
for (int j = 0; j < width*3+1; j++) {
if (i % 2 == 0) {
fwrite(&white, 1, 3, barcode);
fwrite(&black, 1, 3, barcode);
}
else {
fwrite(&black, 1, 3, barcode);
fwrite(&white, 1, 3, barcode);
}
}
fwrite(&pad, 1, 1, barcode);
}
and that outputs the bmp file
What is wrong? And if there are any tips to work on creating a bmp file would be greatly appreciated :)
Try the below. Note that I've moved the BMP to a bit depth of 32. The wrapper function setColumn will allow you to set individual columns to black or white as you see fit. Much more manageable to think of the BMP as an array that you can freely manipulate instead of having to deal with a ton of fwrite logic.
#include "stdafx.h"
#include <fstream>
#define NEWBARCODE "test.bmp"
#define WHITE 255
#define BLACK 0
void setColumn(unsigned char *data, const int height, const int width, const int colIndex, const int grayVal)
{
for (int r = 0; r < height; ++r)
{
data[r * width * 4 + colIndex * 4 + 0] = grayVal;
data[r * width * 4 + colIndex * 4 + 1] = grayVal;
data[r * width * 4 + colIndex * 4 + 2] = grayVal;
data[r * width * 4 + colIndex * 4 + 3] = 255;
}
}
int main()
{
int width, height;
width = 141;
height = 70;
std::ofstream filestream;
filestream.open(NEWBARCODE, std::ios::beg | std::ios::out | std::ios::binary);
int filesize = 54 + 4 * width * height;
char bmpfileheader[14] = { 'B','M', 0,0,0,0, 0,0, 0,0, 54,0,0,0 };
char bmpinfoheader[40] = { 0x28,0,0,0, 141,0,0,0, 70,0,0,0, 1,0, 32,0,0,0,0,0,0x8c,0x05,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
bmpfileheader[2] = (unsigned char)(filesize);
bmpfileheader[3] = (unsigned char)(filesize >> 8);
bmpfileheader[4] = (unsigned char)(filesize >> 16);
bmpfileheader[5] = (unsigned char)(filesize >> 24);
filestream.write(bmpfileheader, 14);
filestream.write(bmpinfoheader, 40);
//Allocate BMP data block
unsigned char *data = new unsigned char[width * height * 4]{ 0 };
//Initialize BMP data to all black pixels
for (int i = 0; i < width * height * 4; ++i)
data[i] = 0;
//Set white
for (int i = 75; i < 100; ++i)
setColumn(data, height, width, i, WHITE);
//Set white
for (int i = 15; i < 25; ++i)
setColumn(data, height, width, i, WHITE);
//Set black
for (int i = 20; i < 23; ++i)
setColumn(data, height, width, i, BLACK);
filestream.write((const char *)data, height * width * 4);
filestream.close();
delete data;
return 0;
}

n-th order Bezier Curves?

I've managed to implement quadratic and cubic Bezier curves.They are pretty straightforward since we have a formula. Now I want to represent an n-th order Bezier curve using the generalization:
Where
and
I'm using a bitmap library to render the output, so here is my code:
// binomialCoef(n, k) = (factorial(n) / (factorial(k) * factorial(n- k)))
unsigned int binomialCoef(unsigned int n, const unsigned int k)
{
unsigned int r = 1;
if(k > n)
return 0;
for(unsigned int d = 1; d <= k; d++)
{
r *= n--;
r /= d;
}
return r;
}
void nBezierCurve(Bitmap* obj, const Point* p, const unsigned int nbPoint, float steps, const unsigned char red, const unsigned char green, const unsigned char blue)
{
int bx1 = p[0].x;
int by1 = p[0].y;
int bx2;
int by2;
steps = 1 / steps;
for(float i = 0; i < 1; i += steps)
{
bx2 = by2 = 0;
for(int j = 0; (unsigned int)j < nbPoint; j++)
{
bx2 += (int)(binomialCoef(nbPoint, j) * pow(1 - i, (float)nbPoint - j) * pow(i, j) * p[j].x);
by2 += (int)(binomialCoef(nbPoint, j) * pow(1 - i, (float)nbPoint - j) * pow(i, j) * p[j].y);
}
bresenhamLine(obj, bx1, by1, bx2, by2, red, green, blue);
bx1 = bx2;
by1 = by2;
}
// curve must end on the last anchor point
bresenhamLine(obj, bx1, by1, p[nbPoint - 1].x, p[nbPoint - 1].y, red, green, blue);
}
Here's the set of points to render:
Point ncurv[] = {
20, 200,
70, 300,
200, 400,
250, 200
};
and here's the output:
The red curve is a cubic Bezier. The blue one is supposed to be the 4th order Bezier, which is the same as cubic Bezier, but in this case, they are not the same ?!
EDIT :
I forgot to note that the bottom left point is (0, 0)
The sum in your formula...
...runs from 0 to n, ie for an n-th order bezier you need n+1 points.
You have 4 points, so you're drawing a 3rd-order bezier.
The error in your code is here:
for(int j = 0; (unsigned int)j < nbPoint; j++)
it should be:
for(int j = 0; (unsigned int)j <= nbPoint; j++)
otherwise you're only iterating from 0 to n-1.
EDIT:
Out of interest, the shape you were getting is the same as if the missing (5th) point was at (0,0), since that's the only point that would contribute nothing to your sum...
You are trying to construct a 4th-order Bezier curve on only four points. No wonder it's not working.

Multidimensional array index to row,col,depth values?

I have a few values which are offsets to a multidimensional array , and look like this :
static const int TILE_SIZE = 32;
int Offset2D = (y * TILE_SIZE) + (x * TILE_SIZE);
int Offset3D = (y * TILE_SIZE) + (x * TILE_SIZE) + (z * TILE_SIZE);
Now what i would like to do is to convert an offset to x,y,z pair , like so :
void ConvertBack(int offset,int size,int& x,int& y,int& z)
{
//What's wrong with this code ?
x = offset / size;
y = offset % size;
z = ??; //How to get Z?
}
or
//Get back offsets from any dimension ?
void ConvertBackComplex(unsigned int offset,int size,int* vector,int len)
{
for (int i = 0;i < len;i++)
{
vector[i] = offset ?... ?
}
}
...So far all of my attempts have failed....So i would really welcome any help!...
First of all I think you indexing system is a bit off. The way you have things arranged different values of x, y, and z can give the same offset. So, first of all, assuming that TILE_SIZE is how many cells of the array store the data for a given point:
myArray = new arr[xSize*ySize*zSize*TILESIZE]
int offset2D = (x*ySize*zSize + y*zSize)*TILE_SIZE;
int offset3D = (x*ySize*zSize + y*zSize + z)*TILE_SIZE;
To get x,y,z back from the offset one simply does the following:
temp = offset/TILE_SIZE;
x = temp/(ySize*zSize);
y = (temp%(ySize*zSize))/zSize;
z = (temp%(ySize*zSize))%zSize;
For multiple dimensions:
temp = offset/TILE_SIZE;
sizeProduct = 1;
for(int k=1; k<numDims; ++k)
{
sizeProduct*=size[k];
}
for(int i=0; i<numDims; ++i)
{
vector[i]=temp/sizeProduct;
temp = temp % sizeProduct;
if((i+1)<numDims)
{
sizeProduct/=sizes[i+1];
}
}
To calculate array sizes in multiple dimensions:
int arraySize = TILE_SIZE;
for(int i=0; i<numDims; ++i)
{
arraySize*=sizes[i];
}
To calculate array indices in multiple dimensions (assuming vector is your array of coordinates):
int index =0;
sizeProduct = 1;
for(int k=1; k<numDims; ++k)
{
sizeProduct*=size[k];
}
for(int i=0; i<numDims; ++i)
{
index+=sizeProduct*vector[i];
if((i+1)<numDims)
{
sizeProduct/=sizes[i+1];
}
}
index*=TILE_SIZE;
Assuming that all dimensions are TILE_SIZE long, your offset calculations are wrong. Let's say I have an array a which simulated 3d array with all dimensions TILE_SIZE long:
int a[TILE_SIZE * TILE_SIZE * TILE_SIZE];
Then point p with coordinates (x, y, z) would have an offset like this:
int p_offset = z * (TILE_SIZE * TILE_SIZE)
+ y * (TILE_SIZE)
+ x;
Reverse calculation is then:
int p_z = p_offset / (TILE_SIZE * TILE_SIZE);
int p_y = (p_offset - p_z * (TILE_SIZE * TILE_SIZE)) / TILE_SIZE;
int p_x = p_offset % TILE_SIZE;
You can choose different order of dimensions (x, y, z) but you have to be consistent.
Assuming the dimensions go from X to Y to Z (as in X represents the lowest dimension):
You can't use a single function to calculate both the 2D and 3D offsets back into coordinates.
For 2D:
void ConvertBack2D(int offset, int x_len, int &x, int &y)
{
y = offset / x_len;
x = offset % x_len;
}
For 3D:
void ConvertBack3D(int offset, int x_len, int y_len, int &x, int &y, int &z)
{
z = offset / (x_len * y_len);
y = (offset - (x * x_len * y_len)) / y_len;
x = (offset - (x * x_len * y_len)) % x_len;
}

Resources