Integer conversion to Hexadecimal String in C without (sprintf/printf libraries) [duplicate] - c

This question already has answers here:
How can I convert an integer to a hexadecimal string in C?
(7 answers)
Closed 5 years ago.
I have these input variables:
uint16 temperature = 0x1f12;
uint8 array[8] = {0,0,0,...0}
And I want to have
array8[0] = '1';
array8[1] = 'f';
array8[2] = '1';
array8[3] = '2';
array8[4] = '\0';
array8[5] = '\0';
array8[6] = '\0';
array8[7] = '\0';
However, for memory problems (I'm working with microcontrollers!) I need to avoid functions such as sprintf, printf, puts, etc.
How should I do?
Best regards,

Use recursion to determine 1 hex digit at a time.
// print digit at the end and return the next address
static char *itohexa_helper(char *dest, unsigned x) {
if (x >= 16) {
dest = itohexa_helper(dest, x/16);
}
*dest++ = "0123456789abcdef"[x & 15];
return dest;
}
char *itohexa(char *dest, unsigned x) {
*itohexa_helper(dest, x) = '\0';
return dest;
}
int main(void) {
char array[8];
uint16_t temperature = 0x1f11;
puts(itohexa(array, temperature));
puts(itohexa(array, 0));
puts(itohexa(array, 0x1234567));
puts(itohexa(array, UINT_MAX & 0xFFFFFFF));
}
Output
1f11
0
1234567
fffffff

This code uses only 8 additional bytes in stack(int i, j).
int i;
for (i = 0; i < 8; i++) {
array[7 - i] = temperature % 16;
temperature /= 16;
if (temperature == 0)
break;
}
if (i == 8)
i--;
int j;
for (j = 0; j <= i; j++)
array[j] = array[7 - i + j];
for (j = i + 1; j < 8; j++)
array[j] = 0;
for (j = 0; j < 8; j++)
if (array[j] < 10)
array[j] += '0';
else
array[j] += 'a' - 10;
This code first converts temperature = 0x1f12 to array[8] = { 0, 0, 0, 0, 1, 15, 1, 2}.
Then shifts the elements of array so that it becomes array[8] = { 1, 15, 1, 2, 0, 0, 0, 0 }.
And then converts the numbers to corresponding characters: array[8] = { '1', 'f', '1', '2', '0', '0', '0', '0' }.
Note also that this if condition
if (i == 8)
i--;
is never met, since break condition always suffices in the first for loop, even for temperature >= 0x10000000. It's just there in the hope that it might help someone understand this code.

int convert()
{
uint16_t temperature = 0x1f11;
uint8_t array[8] = {0,0,0,0,0,0,0,0};
int i = 0;
for(i = 0; i < 8; i++){
array[i] = (0xf000 & temperature) >> 12 ;
temperature <<= 4;
printf("array[%d] = %x\n", i ,array[i]);
}
return(0);
}

Related

Radix Sort Float

I am trying to sort floats with radix. My current algorithm works with unsigned. For example, if I enter values 12, 100, 1 my sorted values are 1, 12, and 100. However, when I use a function to convert floats to ints back to floats after calling the radix sort, my values remain unsorted. They print as they were entered by the user.
I am unsure how to modify my current function to be able to sort floats with radix.
void rs(unsigned int *a, int c) {
int i;
int m = a[0];
int bt = 0;
unsigned int *b = malloc(0 * sizeof(int));
for (i = 0; i < c; i++) {
if (a[i] > m)
m = a[i];
}
while((m>>bt) > 0){
int buck[2] = { 0 };
for (i = 0; i < c; i++) {
buck[(a[i]>>bt)&1]++;
}
for (i = 1; i < 2; i++) {
buck[i] += buck[i-1];
}
for (i = c-1; i >= 0; i--) {
b[--buck[(a[i]>>bt)&1]] = a[i];
}
for (i = 0; i < c; i++) {
a[i] = b[i];
}
bt++;
}
free(b);
}
The function I am using to transform floats to ints to floats is: Radix Sort for Floats
void rfloat(float* arr, size_t size) {
assert(sizeof(unsigned) == sizeof(float) && sizeof(float) == 4);
unsigned* d = malloc(size * sizeof(unsigned));
for (size_t i = 0; i < size; i++) {
// Interpret float as 32-bit unsigned.
d[i] = *(unsigned*) &(arr[i]);
// Flip all except top if top bit is set.
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
// Flip top bit.
d[i] ^= (1u << 31);
}
rs(d, size);
// Inverse transform.
for (size_t i = 0; i < size; i++) {
d[i] ^= (1u << 31);
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
arr[i] = *(float*) &(d[i]);
}
free(d);
}
There's multiple issues.
You use int all over the place where you should be using unsigned (for values) or size_t (for sizes/indices).
You allocate 0 bytes.
(m >> bt) > 0 doesn't work as a stop condition, shifting bits equal or greater than the width is not specified.
After transforming the data types to unsigned the loop boundaries don't work anymore.
I took the liberty of fixing the above and choosing some better variable names:
#include <limits.h>
void rs(unsigned int *a, size_t c) {
size_t i;
unsigned bit = 0;
unsigned *b = malloc(c * sizeof(unsigned));
unsigned m = a[0]; // Max element.
for (i = 0; i < c; i++) {
if (a[i] > m) m = a[i];
}
while (bit < CHAR_BIT*sizeof(m) && (m >> bit)) {
size_t bucket_len[2] = { 0, 0 };
for (i = 0; i < c; i++) bucket_len[(a[i] >> bit) & 1]++;
size_t bucket_end[2] = {bucket_len[0], bucket_len[0] + bucket_len[1]};
for (i = c; i-- > 0; ) {
size_t j = --bucket_end[(a[i] >> bit) & 1];
b[j] = a[i];
}
for (i = 0; i < c; i++) a[i] = b[i];
bit++;
}
free(b);
}

How do I include a switch-case statement for this encryption/decryption code?

I just started using a microcontroller and I have to implement encryption/decryption in it. Sorry for the super long post.
This is the python script and do not need to be edited.
DEVPATH = "/dev"
TTYPREFIX = "ttyACM"
INPUT = b"Hello!"
#OUTPUT = b"Ifmmp!"
if __name__=='__main__':
for tty in (os.path.join(DEVPATH,tty) for tty in os.listdir(DEVPATH) \
if tty.startswith(TTYPREFIX)):
try:
ctt = serial.Serial(tty, timeout=1, writeTimeout=1)
except serial.SerialException:
continue
ctt.flushInput()
ctt.flushOutput()
# print(ctt)
try:
ctt.write(INPUT)
except serial.SerialTimeoutException:
ctt.__exit__()
continue
for retry in range(3): # Try three times to read connection test result
ret = ctt.read(2*len(INPUT))
print("ret: " + repr(ret))
if INPUT in ret:
sys.exit(0)
break
else:
ctt.__exit__()
continue
break
else:
print("Failed")
sys.exit(1)
This is the main.c file. I know that CDC_Device_BytesReceived will receive the input from the python script. And if there are input, it will run the while loop since Bytes will be more than 0.
while (1)
{
/* Check if data received */
Bytes = CDC_Device_BytesReceived(&VirtualSerial_CDC_Interface);
while(Bytes > 0)
{
/* Send data back to the host */
ch = CDC_Device_ReceiveByte(&VirtualSerial_CDC_Interface);
CDC_Device_SendByte(&VirtualSerial_CDC_Interface, ch);
--Bytes;
}
CDC_Device_USBTask(&VirtualSerial_CDC_Interface);
}
return 0;
}
However, in the loop, I was tasked to add a switch case so that it will switch between encryption and decryption. But I have no idea what kind of condition to use to differentiate the encryption and decryption.
This is the code for encryption.
int crypto_aead_encrypt(unsigned char* c, unsigned long long* clen,
const unsigned char* m, unsigned long long mlen,
const unsigned char* ad, unsigned long long adlen,
const unsigned char* nsec, const unsigned char* npub,
const unsigned char* k)
{
int klen = CRYPTO_KEYBYTES; // 16 bytes
int size = 320 / 8; // 40 bytes
int rate = 128 / 8; // 16 bytes
// int capacity = size - rate;
// Permutation
int a = 12;
int b = 8;
// Padding process appends a 1 to the associated data
i64 s = adlen / rate + 1;
// Padding process appends a 1 to the plain text
i64 t = mlen / rate + 1;
// Length = plaintext mod r
// i64 l = mlen % rate;
u8 S[size];
// Resulting Padded associated data is split into s blocks of r bits
u8 A[s * rate];
// Resulting Padded plain text is split into t blocks of r bits
u8 P[t * rate];
i64 i, j;
// Pad Associated Data
for(i = 0; i < adlen; ++i)
{
A[i] = ad[i];
A[adlen] = 0x80; // 128 bits
// No Padding Applied
for(i = adlen + 1; i < s * rate; ++i)
{
A[i] = 0;
}
}
// Pad Plaintext
for(i = 0; i < mlen; ++i)
{
P[i] = m[i];
P[mlen] = 0x80; // 128 bits
// No Padding Applied
for(i = mlen + 1; i < t * rate; ++i)
{
P[i] = 0;
}
}
// Initialization
// IV = k || r || a || b || 0
// S = IV || K || N
S[0] = klen * 8;
S[1] = rate * 8;
S[2] = a;
S[3] = b;
// i < 40 - 2 * 16 = 8
for(i = 4; i < size - 2 * klen; ++i)
{
// S[4] until S[7] = 0
S[i] = 0;
}
// i < 16
for(i = 0; i < klen; ++i)
{
// S[8] = k[0], S[9] = k[1] until S[23] = k[15]
S[size - 2 * klen + i] = k[i];
}
// i < 16
for(i = 0; i < klen; i++)
{
// S[24] = npub[0], S[25] = npub[1] until S[39] = npub[15]
S[size - klen + i] = npub[i];
}
printstate("Initial Value: ", S);
// S - state, 12-a - start, a - 12 rounds
permutations(S, 12 - a, a);
// i < 16
for(i = 0; i < klen; ++i)
{
// S[24] ^= k[0], S[25] ^= k[1] until S[39] ^= k[15]
S[size - klen + i] ^= k[i];
}
printstate("Initialization: ", S);
// Process Associated Data
if(adlen != 0)
{
// i < s = (adlen / rate + 1)
for(i = 0; i < s; ++i)
{
// rate = 16
for(j = 0; j < rate; ++i)
{
// S ^= A
S[j] ^= A[i * rate + j];
}
// S - state, 12-b - start, b - 8 rounds
permutations(S, 12 - b, b);
}
}
// S <- S ^= 1
S[size - 1] ^= 1;
printstate("Process Associated Data: ", S);
// Process Plain Text
for(i = 0; i < t - 1; ++i)
{
for(j = 0; j < rate; ++j)
{
// S <- S ^= P
S[j] ^= P[i * rate + j];
// c <- S
c[i * rate + j] = S[j];
}
// S <- permutation b (S)
permutations(S, 12 - b, b);
}
for(j = 0; j < rate; ++j)
{
// S <- S ^= Pt
S[j] ^= P[(t-1) * rate + j];
}
for(j = 0; j < 1; ++j);
{
// C <- S
// Bitstring S truncated to the first (most significant) k bits
c[(t - 1) * rate + j] = S[j];
}
printstate("Process Plaintext: ", S);
// Finalization
for(i = 0; i < klen; ++i)
{
S[rate + i] ^= k[i];
}
permutations(S, 12 - a, a);
for(i = 0; i < klen; ++i)
{
// T <- S ^= k
// Bitstring S truncated to the last (least significant) k bits
S[size - klen + i] ^= k[i];
}
printstate("Finalization: ", S);
// Return Cipher Text & Tag
for(i = 0; i < klen; ++i)
{
c[mlen + i] = S[size - klen + i];
}
*clen = mlen + klen;
return 0;
}
and the code for decryption
int crypto_aead_decrypt(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec, const unsigned char *c,
unsigned long long clen, const unsigned char *ad,
unsigned long long adlen, const unsigned char *npub,
const unsigned char *k)
{
*mlen = 0;
if (clen < CRYPTO_KEYBYTES)
return -1;
int klen = CRYPTO_KEYBYTES;
// int nlen = CRYPTO_NPUBBYTES;
int size = 320 / 8;
int rate = 128 / 8;
// int capacity = size - rate;
int a = 12;
int b = 8;
i64 s = adlen / rate + 1;
i64 t = (clen - klen) / rate + 1;
i64 l = (clen - klen) % rate;
u8 S[size];
u8 A[s * rate];
u8 M[t * rate];
i64 i, j;
// pad associated data
for (i = 0; i < adlen; ++i)
{
A[i] = ad[i];
}
A[adlen] = 0x80;
for (i = adlen + 1; i < s * rate; ++i)
{
A[i] = 0;
}
// initialization
S[0] = klen * 8;
S[1] = rate * 8;
S[2] = a;
S[3] = b;
for (i = 4; i < size - 2 * klen; ++i)
{
S[i] = 0;
}
for (i = 0; i < klen; ++i)
{
S[size - 2 * klen + i] = k[i];
}
for (i = 0; i < klen; ++i)
{
S[size - klen + i] = npub[i];
}
printstate("initial value:", S);
permutations(S, 12 - a, a);
for (i = 0; i < klen; ++i)
{
S[size - klen + i] ^= k[i];
}
printstate("initialization:", S);
// process associated data
if (adlen)
{
for (i = 0; i < s; ++i)
{
for (j = 0; j < rate; ++j)
{
S[j] ^= A[i * rate + j];
}
permutations(S, 12 - b, b);
}
}
S[size - 1] ^= 1;
printstate("process associated data:", S);
// process plaintext
for (i = 0; i < t - 1; ++i)
{
for (j = 0; j < rate; ++j)
{
M[i * rate + j] = S[j] ^ c[i * rate + j];
S[j] = c[i * rate + j];
}
permutations(S, 12 - b, b);
}
for (j = 0; j < l; ++j)
{
M[(t - 1) * rate + j] = S[j] ^ c[(t - 1) * rate + j];
}
for (j = 0; j < l; ++j)
{
S[j] = c[(t - 1) * rate + j];
S[l] ^= 0x80;
}
printstate("process plaintext:", S);
// finalization
for (i = 0; i < klen; ++i)
{
S[rate + i] ^= k[i];
}
permutations(S, 12 - a, a);
for (i = 0; i < klen; ++i)
{
S[size - klen + i] ^= k[i];
}
printstate("finalization:", S);
// return -1 if verification fails
for (i = 0; i < klen; ++i)
{
if (c[clen - klen + i] != S[size - klen + i])
{
return -1;
}
}
// return plaintext
*mlen = clen - klen;
for (i = 0; i < *mlen; ++i)
{
m[i] = M[i];
}
return 0;
}
Thanks for the help in advance, I am really clueless right now.
However, in the loop, I was tasked to add a switch case so that it
will switch between encryption and decryption. But I have no idea what
kind of condition to use to differentiate the encryption and
decryption.
According to your comments, the calls for encryption and decryption are happening inside of CDC_Device_ReceiveByte and CDC_Device_SendByte, which means you need to create a state machine for sending and receiving of the bytes. The condition that you would use for this is the return value of CDC_Device_BytesReceived.
You can create an enum for the states, and a simple struct for holding the current state along with any other pertinent information. You can create a function for the state machine that maps out what to do given the current state. Your while(1) loop will simply call the function to ensure the state machine moves along. You might implement that like this:
typedef enum{
IDLE,
DECRYPTING,
ENCRYPTING,
}state_t;
typedef struct{
state_t current_state;
}fsm_t;
fsm_t my_fsm = {0}; //initial state is idle
void myFSM(void){
switch(my_fsm.current_state){
case IDLE:
{
/* Check if data received */
Bytes = CDC_Device_BytesReceived(&VirtualSerial_CDC_Interface);
if(Bytes) my_fsm.current_state = DECRYPTING; //we have data, decrypt it
break;
}
case DECRYPTING:
{
/* Send data back to the host */
ch = CDC_Device_ReceiveByte(&VirtualSerial_CDC_Interface);
my_fsm.current_state = ENCRYPTING; // encrypt byte that we are going to send to host
break;
}
case ENCRYPTING:
{
CDC_Device_SendByte(&VirtualSerial_CDC_Interface, ch);
--Bytes;
if(Bytes){
my_fsm.current_state = DECRYPTING; // still have bytes left to decrypt
}
else my_fsm.current_state = IDLE;
break;
}
default:
{
asm("nop"); // whoops
break;
}
}
}
Now your loop is just
while(1){
myFSM();
}

How to concatenate bit by bit in c?

I have matrix of '1' and '0' with the dimensions 8x8. I need to store the whole matrix in one unsigned long long variable bit by bit. How can i do that?
For example, let's take the matrix of '1' and '0' that is 2x2:
The matrix 2x2:
1 0
0 1
The variable must contain: 1001 in bits.
The same example, but over the matrix 8x8 and unsigned long long variable.
That's what i've tried to do:
#include <stdio.h>
int main()
{
unsigned long long result = 0;
char matrix[8][8]; // lets that the matrix is already filled by '1' and '0'
for (i=0; i<SIZE; i++)
{
for (j=0; j<SIZE; j++)
{
result = result | ((unsigned long long)(matrix[i][j] - '0'));
result <<= 1;
}
}
return 0;
}
Is it right? I implemented this nested loop in my algorithm and that didn't work properly.
Converting the text representation of an integer into its integer value can be done using strtoull().
char buf[sizeof(matrix)+1];
memcpy(buf, matrix, sizeof(matrix));
buf[sizeof(matrix)] = '\0';
result = strtoull(buf, NULL, 2);
try this
const int mx_size = 8;
int main() {
unsigned long long result = 0;
bool matrix[8][8]; // lets that the matrix is already filled by '1' and '0'
for (int i =0; i < mx_size; ++i)
matrix[i][i] = 1;
for (int i = 0; i < mx_size; i++) {
for (int j = 0; j < mx_size; j++) {
result |= (unsigned long long)matrix[i][j] << (i*mx_size + j);
}
}
return 0;
}
Here you have the code (a bit more
#include <stdio.h>
#include <stdint.h>
uint64_t convert(char matrix[8][8], int order, char zero)
{
uint8_t byte;
uint64_t result = 0;
for(size_t row = 0; row < 8; row++)
{
byte = 0;
for(size_t column = 0; column < 8; column++)
{
byte <<= 1;
byte |= matrix[row][column] != zero ? 1 : 0; //anything != defined zero char is 1
}
if (order)
{
result |= (uint64_t)byte << (8 * row);
}
else
{
result |= (uint64_t)byte << (56 - 8 * row);
}
}
return result;
}
int main(void) {
char matrix[8][8] =
{
{'1','0','1','0','1','0','1','0'},
{'0','1','0','1','0','1','0','1'},
{'1','1','1','0','0','0','1','1'},
{'0','0','0','1','1','1','0','0'},
{'1','1','1','1','1','0','0','0'},
{'0','0','0','0','1','1','1','1'},
{'1','1','0','0','1','1','0','0'},
{'0','0','1','1','0','0','1','1'},
};
unsigned long long result = convert(matrix, 0, '0');
for(size_t index = 0; index < 64; index ++)
printf("%1d", !!(result & (1ULL << index)));
printf("\n");
result = convert(matrix,1, '0');
for(size_t index = 0; index < 64; index ++)
printf("%1d", !!(result & (1ULL << index)));
printf("\n");
return 0;
}

convert negative decimal number to binary number

I tried to convert a negative decimal number into a binary number and this code perfectly works on my computer, but the code doesn't work another computer.
I didn't get how it is possible. What is wrong in my code?
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
void decTobin(int dec, int s)
{
int b[s], i = 0;
while (dec >= 0 && i != s - 1) {
b[i] = dec % 2;
i++;
dec /= 2;
}
int j = i;
printf("%d", dec);
for (j = i - 1; j >= 0; j--) {
if (b[j] == NULL)
b[j] = 0;
printf("%d",b[j]);
}
}
void ndecTobin(int dec, int s)
{
int b[s], i = 0, a[s], decimal, decimalvalue = 0, g;
while (dec >= 0 && i != s-1) {
b[i] = dec % 2;
i++;
dec /= 2;
}
int j = i;
printf("%d",dec);
for (j = i - 1; j >= 0; j--) {
if (b[j] == NULL)
b[j] = 0;
printf("%d",b[j]);
}
printf("\n");
a[s - 1] = dec;
for (j = s - 2; j >= 0; j--) {
a[j] = b[j];
}
for (j = s - 1; j >= 0; j--) {
if (a[j] == 0)
a[j] = 1;
else
a[j] = 0;
printf("%d",a[j]);
}
for (g = 0; g < s; g++) {
decimalvalue = pow(2, g) * a[g];
decimal += decimalvalue;
}
decimal = decimal + 1;
printf("\n%d\n", decimal);
decTobin(decimal, s);
}
int main()
{
int a, b;
printf("enter a number: ");
scanf(" %d", &a);
printf("enter the base: ");
scanf("%d", &b);
ndecTobin(a, b);
}
decimal and int b[s] not initialized.
By not initializing decimal to 0, it might have the value of 0 on a machine one day and quite different results otherwise.
void decTobin(int dec, int s) {
// while loop does not set all `b`,but following for loop uses all `b`
// int b[s], i = 0;
int b[s] = { 0 }; // or int b[s]; memset(b, 0, sizeof b);
int i = 0;
}
void ndecTobin(int dec, int s) {
int b[s], i = 0, a[s], decimal, decimalvalue = 0, g;
decimal = 0;
...
decimal += decimalvalue;
}
Minor points:
1) if (b[j] == NULL) b[j] = 0; is strange. NULL is best used as a pointer, yet code is comparing b[j], an int to a pointer. Further, since NULL typically has the arithmetic value of 0, code looks like if (b[j] == 0) b[j] = 0;.
2) decTobin() is challenging to follow. It certainly is only meant for non-negative dec and s. Candidate simplification:
void decTobin(unsigned number, unsigned width) {
int digit[width];
for (unsigned i = width; i-- > 0; ) {
digit[i] = number % 2;
number /= 2;
}
printf("%u ", number); // assume this is for debug
for (unsigned i = 0; i<width; i++) {
printf("%u", digit[i]);
}
}
It looks like you are just printing the number as a binary representation. If so this version would work.
void print_binary(size_t n) {
/* buffer large enough to hold number to print */
unsigned buf[CHAR_BIT * sizeof n] = {0};
unsigned i = 0;
/* handle special case user calls with n = 0 */
if(n == 0) {
puts("0");
return;
}
while(n) {
buf[i++] = n % 2;
n/= 2;
}
/* print buffer backwards for binary representation */
do {
printf("%u", buf[--i]);
} while(i != 0);
}
If you don't like the buffer, you can also do it using recursion like this:
void using_recursion(size_t n)
{
if (n > 1)
using_recursion(n/2);
printf("%u", n % 2);
}
Yet another way is to print evaluating most significant bits first. This however introduces issue of leading zeros which in code below are skipped.
void print_binary2(size_t n) {
/* do not print leading zeros */
int i = (sizeof(n) * 8)-1;
while(i >= 0) {
if((n >> i) & 1)
break;
--i;
}
for(; i >= 0; --i)
printf("%u", (n >> i) & 1);
}
Different OS/processor combinations may result in C compilers that store various kinds of numeric variables in different numbers of bytes. For instance, when I first learned C (Turbo C on a 80368, DOS 5) an int was two bytes, but now, with gcc on 64-bit Linux, my int is apparently four bytes. You need to include some way to account for the actual byte length of the variable type: unary operator sizeof(foo) (where foo is a type, in your case, int) returns an unsigned integer value you can use to ensure you do the right number of bit shifts.

Stack around the variable 's' was corrupted

I got this error in my rc4 algorithm, it works well, but i got this error every time when the message is too big, like 1000kB, here is the code:
char* rc4(const int* key, int key_size, char* buff, int buff_size){
int i, j, k;
int s[255], rk[255]; //rk = random_key
char* encrypted = alloc_char_buffer(buff_size);
for (i = 0; i < 255; i++){
s[i] = i;
rk[i] = key[i%key_size];
}
j = 0;
for (i = 0; i < 255; i++){
j = (j + s[j] + rk[i]) % 256;
SWITCH(s + i, s + j);
}
i = 0;
j = 0;
for (k = 0; k < buff_size; k++){
i = (i + 1) % 256;
j = (j + s[i]) % 256;
SWITCH(s + i, s + j);
//try{
//}
//catch ()
encrypted[k] = (char)(s[(s[i] + s[j]) % 256] ^ (int)buff[k]);
}
encrypted[buff_size] = 0;
return encrypted;
}
at the end o the last loop i got this error, i think this is some type of buffer overflow error, the only variable able to do that is the 'encrypted' but at the end of the loop, the value of the variable 'k' have the exactly same value of 'buff_size' that is used to alloc memory for 'encrypted', if someone can help i'll thank you
the 'encrypted' is "non-null terminated", so if the string have 10 bytes i will allocate only 10 bytes, not 11 for the '\0'
if you need, here is the code for alloc_char_buffer(unsigned int)
char* alloc_char_buffer(unsigned int size){
char* buff = NULL;
buff = (char*)calloc(size+1, sizeof(char));
if (!buff)
_error("program fail to alloc memory.");
return buff;
}
SWITCH:
//inversão de valores
void SWITCH(int *a, int *b){
*(a) = *(a) ^ *(b); //a random number
*(b) = *(a) ^ *(b); //get a
*(a) = *(a) ^ *(b); //get b
}
char* encrypted = alloc_char_buffer(buff_size);
/* ... */
encrypted[buff_size] = 10;
Here is the problem. You allocate buff_size elements. Thus, the last valid index is buff_size-1, not buff_size.
Another issue:
j = (j + s[j] + rk[i]) % 256;
Thus the range of j is [0, 255], but the legal index of s is only [0, 254]. You should either declare s as a 256-element array or review the algorithm implementation.
Your following line is creating the problem as you are trying to access beyond your allocated memory.
encrypted[buff_size] = 10;
Additionally, you should avoid use calloc instead of writing your own function alloc_char_buffer. It would allocate memory and initialize with 0.
calloc(buff_size, sizeof(char));

Resources