I am fairly new to p5.js, however I am trying to read a .txt file which contains the below text, and draw a picture depending on the value within the .txt file.
00000
20022
00222
22020
11111
I am currently stumped as to how to draw an image depending on the number in the array, as an example '1' would be grass. I have loaded the file in as a string using the following code: track = loadStrings("track1.txt");
I am trying to load it as a 5x5 'tile' if you will. Any help would be appreciated :)
I've used p5.Image to create a picture based on the pixels in the file.
This is a way of writing the code:
let track;
let colors;
let img;
function setup() {
createCanvas(100, 100);
track=loadStrings("track1.txt")
colors = [color(255,0,0),color(0,255,0),color(0,0,255)]
}
function draw() {
background(220);
img = createImage(track.length, track[0].length)
img.loadPixels();
for (let i = 0 ; i < track.length ; i++){
for (let j = 0 ; j < track.length ; j++){
img.set(i, j, colors[int(track[i][j])]);
}
}
img.updatePixels();
image(img, 50, 50);
}
Well you could probs split it up into arrays and also if you'd have some sort of seperator for colors, like: track1.txt: 10, 30, 255\n ... r, g, b\n .... Right now you would have to use the rgb rrrgggbbb
let colors = track.split("\n") // makes every new line into an array
for(let x = 0; x <= width; x ++) // "\n" = new line
for(let y = 0; y <= height; y ++){
let currentCol = []
for(let i = 0; i < 9; i += 3)
currentCol.push(
colors[x + y][0 + i] + // I'm really not sure about the: colors[x + y]...
colors[x + y][1 + i] +
colors[x + y][2 + i]
)
set(x, y, color(currentCol[0], currentCol[1], currentCol[2]))
}
I also made a function with a slightly different formula, which might work better, i am not sure though, but this is the actual formula to get from pixel array
function getColor(x, y, allColorsArr){
let _col = [] // current color, underscore not to accidentally clear your variable
for(let i = 0; i < 3; i ++)
_col.push(
allColorsArr[x + y * width][0 + i * 3] +
allColorsArr[x + y * width][1 + i * 3] +
allColorsArr[x + y * width][2 + i * 3]
)
return {
r: parseInt(_col[0], 10),
g: parseInt(_col[1], 10),
b: parseInt(_col[2], 10)
} // returning an object here just for the `.r`; `.g`; `.b`
} // you could do something like: let Colors = {red: 0, green: 1, blue: 2}
// col[Colors.red], col[Colors.green], col[Colors.blue] if you want
// example: let col = getColor(1, 0, track.split("\n"))
// example: stroke(col.r, col.g, col.b)
THERE IS MOST LIKELY IS A BETTER WAY TO DO THIS, but at least this works...
Related
I am currently developing an OCR for a sudoku and i am trying to first get a clean black and white image. I first apply a grayscale then a median filter then an otsu algorithm.
My problem is that my results are better when i dont apply my median filter.
Does anyone know why ?
starting image
with my median filter
without my median filter
here is the code for my median filter :
void median_filter(SDL_Surface *image) {
int width = image->w;
int height = image->h;
for (int y = 1; y < height - 1; y++) {
for (int x = 1; x < width - 1; x++) {
Uint8 gray_values[9];
int index = 0;
for (int dy = -1; dy <= 1; dy++) {
for (int dx = -1; dx <= 1; dx++) {
int pixel_offset = (y+dy) * image->pitch + (x+dx) * 4;
Uint8 r = *(Uint8 *)((Uint8 *)image->pixels + pixel_offset);
Uint8 g = *(Uint8 *)((Uint8 *)image->pixels + pixel_offset + 1);
Uint8 b = *(Uint8 *)((Uint8 *)image->pixels + pixel_offset + 2);
gray_values[index++] = (0.3 * r) + (0.59 * g) + (0.11 * b);
}
}
qsort(gray_values, 9, sizeof(Uint8), cmpfunc);
Uint8 gray = gray_values[4];
int pixel_offset = y * image->pitch + x * 4;
*(Uint8 *)((Uint8 *)image->pixels + pixel_offset) = gray;
*(Uint8 *)((Uint8 *)image->pixels + pixel_offset + 1) = gray;
*(Uint8 *)((Uint8 *)image->pixels + pixel_offset + 2) = gray;
}
}
}
You are filtering with some neighbour values that were already filtered – the three pixels above and one on the left.
You need to create median values in a new image. This must also include the unfiltered pixels around the edges.
If you are applying multiple filters, then use one buffer as the source, and another as the destination, then swap the direction for the next filter application (by passsing two buffers to the filter functions).
i am trying to make a blur filter in c that takes the neighboring pixels of the main pixel, takes the avarage of the rgb values and stores it in the temp array, them changes the image using the temp array values, it seems correct but it is not working as intended, giving an output of a very slightly blured image. I realy dont see my mistake and would be very thankful if someone helped, sorry if i made something horrible, started learning c last week.
i checked this post
Blurring an Image in c pixel by pixel - special cases
but i did not see were i went wrong.
im working with this data struct
BYTE rgbtBlue;
BYTE rgbtGreen;
BYTE rgbtRed;
void blur(int height, int width, RGBTRIPLE image[height][width])
{
// ints to use later
int j;
int p;
RGBTRIPLE temp[height][width];
for(int n = 0; n < height; n++) // loop to check every pixel
{
for(int k = 0; k < width; k++)
{
int widx = 3;
int hghtx = 3;
// conditionals for border cases
int y = 0;
if(n == 0)
{
p = 0;
hghtx = 2;
}
if(n == height - 1)
{
p = -1;
hghtx = 2;
}
if(k == 0)
{
j = 0;
widx = 2;
}
if(k == width - 1)
{
j = -1;
widx = 2;
}
for(int u = 0; u < hghtx; u++) // matrix of pixels around the main pixel using the conditionals gathered before
for(int i = 0; i < widx; i++)
if(y == 1) // takes the average of color and stores it in the RGB temp
{
temp[n][k].rgbtGreen = temp[n][k].rgbtGreen + image[n + p + u][k + j + i].rgbtGreen / (hghtx * widx);
temp[n][k].rgbtRed = temp[n][k].rgbtRed + image[n + p + u][k + j + i].rgbtRed / (hghtx * widx);
temp[n][k].rgbtBlue = temp[n][k].rgbtBlue + image[n + p + u][k + j + i].rgbtBlue / (hghtx * widx);
}
else // get first value of temp
{
temp[n][k].rgbtGreen = (image[n + p + u][k + j + i].rgbtGreen) / (hghtx * widx);
temp[n][k].rgbtRed = (image[n + p + u][k + j + i].rgbtRed) / (hghtx * widx);
temp[n][k].rgbtBlue = (image[n + p + u][k + j + i].rgbtBlue) / (hghtx * widx);
y++;
}
}
}
// changes the original image to the blured one
for(int n = 0; n < height; n++)
for(int k = 0; k < width; k++)
image[n][k] = temp[n][k];
}
I think it's a combination of things.
If the code worked the way you expect, you would be still doing a blur of just 3x3 pixels and that can be hardly noticeable, especially on large images (I'm pretty sure it will be unnoticeable on an image 4000x3000 pixels)
There are some problems with the code.
As #Fe2O3 says, at the end of the first line, widx will change to 2 and stay 2 for the rest of the image.
you are reading from temp[][] without initializing it. I think that if you compile that in release mode (not debug), temp[][] will contain random data and not all zeros as you probably expect. (as #WeatherWane pointed out)
The way you calculate the average of the pixels is weird. If you use a matrix 3x3 pixels, each pixel value shoud be divided by 9 in the final sum. But you divide the first pixel nine times by 2 (in effect doing /256), the second one eight times by 2 (so its pixel/128) etc. until the last one is divided by 2. So basically, it's mostly the value of the bottom right pixel.
also, since your RGB values are just bytes, you may want to divide them first and only then add them, because otherwise, you'll get overflows with wild results.
Try using a debugger to see the values you are actually calculating. It can be quite an eye opener :)
// this method will be called once per frame
(plane as any).tick = (delta: number) => {
let pos = geometry.getAttribute("position");
let pa = pos.array as Float32Array;
let vectorArr : THREE.Vector3[] = [];
for (let i = 0; i < pa.length; i += 3) {
vectorArr.push(new THREE.Vector3(pa[i], pa[i + 1], pa[i + 2]));
}
for (let vect of vectorArr) {
vect.x -= 5 * delta;
if (vect.x < - 40) {
vect.x = 40;
removeArr.push(vect);
}
}
vectorArr.map((vect, i) => {
pa[3 * i] = vect.x;
pa[3 * i + 1] = vect.y;
pa[3 * i + 2] = vect.z;
});
pos.needsUpdate = true;
}
The above code is used for modifying the plane, I'm basically just trying to reset the vectors that are past x=-40, but that's leaving behind some sort of 'connecting plane' between the plane. Image below an example, the material is double sided(but changing it still leaves behind the weird connecting plane).
What it looks like normally, without the vector.x -= 5 * delta below.
I'm basically just trying to create an infinite scrolling landscape, so if you have a good way to do this in ThreeJS apart from my code, please let me know!
I have an array of images, when i click on one of them, I want it to loop and create a pattern. (This part already works). When i want to get the last clicked image out of my array i get a 'NullPointerException'
PImage[] patronen = new PImage[7];
int pLength = patronen.length;
PImage selectedPatroon = patronen[patronen.length-1];
void setup(){
size(1024, 768);
}
void draw(){
createPGrid();
image(selectedPatroon, xPos, yPos);
}
void createPGrid(){
for(int j = 0; j < gpLength; j++){
// Grid maanmaken
xPos = xOffset + ((j % cols) * (size+padding));
yPos = yOffset + ((j / cols) * (size+padding));
// Thumbs
patronen[j] = loadImage( j + ".png");
image(patronen[j], xPos, yPos);
// Check if thumb is clicked
if((mouseX >= xPos && mouseX <= xPos+size) &&
(mouseY >= yPos && mouseY <= yPos+size)){
if (mousePressed){
// grid patronen
xPos = 0;
yPos = 0;
// Loop pattern
while( yPos < height ){
while( xPos < width ){
patronen[j] = loadImage(j + "groot" + ".png");
selectedPatroon = patronen[j]
xPos += 500;
}
yPos +=500;
xPos = 0;
}
rect(xPos, yPos, size, size);
}
}
}
}
EDIT: The thing is, it works perfectly without
PImage selectedPatroon = patronen[patronen.length-1];
but then the looped pattern comes above all my others functions. And i want it to be under that.
You are creating an array at the top of your code then setting an image to the last element in the array, but you're not populating the array first.
PImage[] patronen = new PImage[7]; <<< Blank 7 element array.
int pLength = patronen.length;
PImage selectedPatroon = patronen[patronen.length-1];
To answer the questions you've put in the comments, you cannot populate an array of PImages with ints, those are different variable types. Try something like this in setup():
PImage patronen = new PImage[7];
for(int i = 0; i < patronen.length; i++){
patronen[i] = loadImage("image" + i + ".png"); // or whatever format your filenames are
}
The loadImage(String name) function returns a PImage, so you can insert it directly into your array. Just trying to insert the numbers won't work because the computer doesn't know that the numbers represent the filename without you telling it with loadImage().
I've been using the FJCore library in a Silverlight project to help with some realtime image processing, and I'm trying to figure out how to get a tad more compression and performance out of the library. Now, as I understand it, the JPEG standard allows you to specify a chroma subsampling ratio (see http://en.wikipedia.org/wiki/Chroma_subsampling and http://en.wikipedia.org/wiki/Jpeg); and it appears that this is supposed to be implemented in the FJCore library using the HsampFactor and VsampFactor arrays:
public static readonly byte[] HsampFactor = { 1, 1, 1 };
public static readonly byte[] VsampFactor = { 1, 1, 1 };
However, I'm having a hard time figuring out how to use them. It looks to me like the current values are supposed to represent 4:4:4 subsampling (e.g., no subsampling at all), and that if I wanted to get 4:1:1 subsampling, the right values would be something like this:
public static readonly byte[] HsampFactor = { 2, 1, 1 };
public static readonly byte[] VsampFactor = { 2, 1, 1 };
At least, that's the way that other similar libraries use these values (for instance, see the example code here for libjpeg).
However, neither the above values of {2, 1, 1} nor any other set of values that I've tried besides {1, 1, 1} produce a legible image. Nor, in looking at the code, does it seem like that's the way it's written. But for the life of me, I can't figure out what the FJCore code is actually trying to do. It seems like it's just using the sample factors to repeat operations that it's already done -- i.e., if I didn't know better, I'd say that it was a bug. But this is a fairly established library, based on some fairly well established Java code, so I'd be surprised if that were the case.
Does anybody have any suggestions for how to use these values to get 4:2:2 or 4:1:1 chroma subsampling?
For what it's worth, here's the relevant code from the JpegEncoder class:
for (comp = 0; comp < _input.Image.ComponentCount; comp++)
{
Width = _input.BlockWidth[comp];
Height = _input.BlockHeight[comp];
inputArray = _input.Image.Raster[comp];
for (i = 0; i < _input.VsampFactor[comp]; i++)
{
for (j = 0; j < _input.HsampFactor[comp]; j++)
{
xblockoffset = j * 8;
yblockoffset = i * 8;
for (a = 0; a < 8; a++)
{
// set Y value. check bounds
int y = ypos + yblockoffset + a; if (y >= _height) break;
for (b = 0; b < 8; b++)
{
int x = xpos + xblockoffset + b; if (x >= _width) break;
dctArray1[a, b] = inputArray[x, y];
}
}
dctArray2 = _dct.FastFDCT(dctArray1);
dctArray3 = _dct.QuantizeBlock(dctArray2, FrameDefaults.QtableNumber[comp]);
_huf.HuffmanBlockEncoder(buffer, dctArray3, lastDCvalue[comp], FrameDefaults.DCtableNumber[comp], FrameDefaults.ACtableNumber[comp]);
lastDCvalue[comp] = dctArray3[0];
}
}
}
And notice that in the i & j loops, they're not controlling any kind of pixel skipping: if HsampFactor[0] is set to two, it's just grabbing two blocks instead of one.
I figured it out. I thought that by setting the sampling factors, you were telling the library to subsample the raster components itself. Turns out that when you set the sampling factors, you're actually telling the library the relative size of the raster components that you're providing. In other words, you need to do the chroma subsampling of the image yourself, before you ever submit it to the FJCore library for compression. Something like this is what it's looking for:
private byte[][,] GetSubsampledRaster()
{
byte[][,] raster = new byte[3][,];
raster[Y] = new byte[width / hSampleFactor[Y], height / vSampleFactor[Y]];
raster[Cb] = new byte[width / hSampleFactor[Cb], height / vSampleFactor[Cb]];
raster[Cr] = new byte[width / hSampleFactor[Cr], height / vSampleFactor[Cr]];
int rgbaPos = 0;
for (short y = 0; y < height; y++)
{
int Yy = y / vSampleFactor[Y];
int Cby = y / vSampleFactor[Cb];
int Cry = y / vSampleFactor[Cr];
int Yx = 0, Cbx = 0, Crx = 0;
for (short x = 0; x < width; x++)
{
// Convert to YCbCr colorspace.
byte b = RgbaSample[rgbaPos++];
byte g = RgbaSample[rgbaPos++];
byte r = RgbaSample[rgbaPos++];
YCbCr.fromRGB(ref r, ref g, ref b);
// Only include the byte in question in the raster if it matches the appropriate sampling factor.
if (IncludeInSample(Y, x, y))
{
raster[Y][Yx++, Yy] = r;
}
if (IncludeInSample(Cb, x, y))
{
raster[Cb][Cbx++, Cby] = g;
}
if (IncludeInSample(Cr, x, y))
{
raster[Cr][Crx++, Cry] = b;
}
// For YCbCr, we ignore the Alpha byte of the RGBA byte structure, so advance beyond it.
rgbaPos++;
}
}
return raster;
}
static private bool IncludeInSample(int slice, short x, short y)
{
// Hopefully this gets inlined . . .
return ((x % hSampleFactor[slice]) == 0) && ((y % vSampleFactor[slice]) == 0);
}
There might be additional ways to optimize this, but it's working for now.