Is there a way to convert from UTF8 to ISO-8859-1? - c

My software is getting some strings in UTF8 than I need to convert to ISO 8859 1. I know that UTF8 domain is bigger than ISO 8859. But the data in UTF8 has been previously upconverted from ISO, so I should not miss anything.
I would like to know if there is an easy / direct way to convert from UTF8 to iso-8859-1.

Here is a function you might find useful: utf8_to_latin9(). It converts to ISO-8859-15 (including EURO, which ISO-8859-1 does not have), but also works correctly for the UTF-8->ISO-8859-1 conversion part of a ISO-8859-1->UTF-8->ISO-8859-1 round-trip.
The function ignores invalid code points similar to //IGNORE flag for iconv, but does not recompose decomposed UTF-8 sequences; that is, it won't turn U+006E U+0303 into U+00F1. I don't bother recomposing because iconv does not either.
The function is very careful about the string access. It will never scan beyond the buffer. The output buffer must be one byte longer than length, because it always appends the end-of-string NUL byte. The function returns the number of characters (bytes) in output, not including the end-of-string NUL byte.
/* UTF-8 to ISO-8859-1/ISO-8859-15 mapper.
* Return 0..255 for valid ISO-8859-15 code points, 256 otherwise.
*/
static inline unsigned int to_latin9(const unsigned int code)
{
/* Code points 0 to U+00FF are the same in both. */
if (code < 256U)
return code;
switch (code) {
case 0x0152U: return 188U; /* U+0152 = 0xBC: OE ligature */
case 0x0153U: return 189U; /* U+0153 = 0xBD: oe ligature */
case 0x0160U: return 166U; /* U+0160 = 0xA6: S with caron */
case 0x0161U: return 168U; /* U+0161 = 0xA8: s with caron */
case 0x0178U: return 190U; /* U+0178 = 0xBE: Y with diaresis */
case 0x017DU: return 180U; /* U+017D = 0xB4: Z with caron */
case 0x017EU: return 184U; /* U+017E = 0xB8: z with caron */
case 0x20ACU: return 164U; /* U+20AC = 0xA4: Euro */
default: return 256U;
}
}
/* Convert an UTF-8 string to ISO-8859-15.
* All invalid sequences are ignored.
* Note: output == input is allowed,
* but input < output < input + length
* is not.
* Output has to have room for (length+1) chars, including the trailing NUL byte.
*/
size_t utf8_to_latin9(char *const output, const char *const input, const size_t length)
{
unsigned char *out = (unsigned char *)output;
const unsigned char *in = (const unsigned char *)input;
const unsigned char *const end = (const unsigned char *)input + length;
unsigned int c;
while (in < end)
if (*in < 128)
*(out++) = *(in++); /* Valid codepoint */
else
if (*in < 192)
in++; /* 10000000 .. 10111111 are invalid */
else
if (*in < 224) { /* 110xxxxx 10xxxxxx */
if (in + 1 >= end)
break;
if ((in[1] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x1FU)) << 6U)
| ((unsigned int)(in[1] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 2;
} else
if (*in < 240) { /* 1110xxxx 10xxxxxx 10xxxxxx */
if (in + 2 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x0FU)) << 12U)
| (((unsigned int)(in[1] & 0x3FU)) << 6U)
| ((unsigned int)(in[2] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 3;
} else
if (*in < 248) { /* 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
if (in + 3 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U &&
(in[3] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x07U)) << 18U)
| (((unsigned int)(in[1] & 0x3FU)) << 12U)
| (((unsigned int)(in[2] & 0x3FU)) << 6U)
| ((unsigned int)(in[3] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 4;
} else
if (*in < 252) { /* 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx */
if (in + 4 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U &&
(in[3] & 192U) == 128U &&
(in[4] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x03U)) << 24U)
| (((unsigned int)(in[1] & 0x3FU)) << 18U)
| (((unsigned int)(in[2] & 0x3FU)) << 12U)
| (((unsigned int)(in[3] & 0x3FU)) << 6U)
| ((unsigned int)(in[4] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 5;
} else
if (*in < 254) { /* 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx */
if (in + 5 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U &&
(in[3] & 192U) == 128U &&
(in[4] & 192U) == 128U &&
(in[5] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x01U)) << 30U)
| (((unsigned int)(in[1] & 0x3FU)) << 24U)
| (((unsigned int)(in[2] & 0x3FU)) << 18U)
| (((unsigned int)(in[3] & 0x3FU)) << 12U)
| (((unsigned int)(in[4] & 0x3FU)) << 6U)
| ((unsigned int)(in[5] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 6;
} else
in++; /* 11111110 and 11111111 are invalid */
/* Terminate the output string. */
*out = '\0';
return (size_t)(out - (unsigned char *)output);
}
Note that you can add custom transliteration for specific code points in the to_latin9() function, but you are limited to one-character replacements.
As it is currently written, the function can do in-place conversion safely: input and output pointers can be the same. The output string will never be longer than the input string. If your input string has room for an extra byte (for example, it has the NUL terminating the string), you can safely use the above function to convert it from UTF-8 to ISO-8859-1/15. I deliberately wrote it this way, because it should save you some effort in an embedded environment, although this approach is a bit limited wrt. customization and extension.
Edit:
I included a pair of conversion functions in an edit to this answer for both Latin-1/9 to/from UTF-8 conversion (ISO-8859-1 or -15 to/from UTF-8); the main difference is that those functions return a dynamically allocated copy, and keep the original string intact.

iconv - perform character set conversion
size_t iconv(iconv_t cd,
char **inbuf, size_t *inbytesleft,
char **outbuf, size_t *outbytesleft);
iconv_t iconv_open(const char *tocode, const char *fromcode);
tocode is "ISO_8859-1" and fromcode is "UTF-8".
Working example:
#include <iconv.h>
#include <stdio.h>
int main (void) {
iconv_t cd = iconv_open("ISO_8859-1", "UTF-8");
if (cd == (iconv_t) -1) {
perror("iconv_open failed!");
return 1;
}
char input[] = "Test äöü";
char *in_buf = &input[0];
size_t in_left = sizeof(input) - 1;
char output[32];
char *out_buf = &output[0];
size_t out_left = sizeof(output) - 1;
do {
if (iconv(cd, &in_buf, &in_left, &out_buf, &out_left) == (size_t) -1) {
perror("iconv failed!");
return 1;
}
} while (in_left > 0 && out_left > 0);
*out_buf = 0;
iconv_close(cd);
printf("%s -> %s\n", input, output);
return 0;
}

The following example uses iconv library also.
It works even when you have a file (or input stream) that contains mixed UTF-8 and ISO-8859-1 characters (this could happen, for example, if you have an UTF-8 file and edit it in environement that uses ISO-8859-1).
int Utf8ToLatin1(char* input, char* output, size_t size)
{
size_t in_left = size;
size_t out_left = size;
char *in_buf = input;
char *out_buf = output;
iconv_t cd = iconv_open("ISO_8859-1", "UTF-8");
if (cd == (iconv_t)-1) {
(void) fprintf(stderr, "iconv_open() failed, msg encoding will be kept!");
strncpy(output, input, size);
return -1;
}
do {
if (iconv(cd, &in_buf, &in_left, &out_buf, &out_left) == (size_t) -1) {
if (errno == EILSEQ) {
/* Input conversion stopped due to an input byte that
* does not belong to the input codeset.
*/
printf("Input conversion stopped due to an input byte that does not belong to the input codeset.\n");
*out_buf= *in_buf;
out_buf++ ;out_left--;
in_buf++ ;in_left--;
} else if (errno == E2BIG) {
/* Input conversion stopped due to lack of space in
* the output buffer.
*/
printf("Input conversion stopped due to lack of space in the output buffer.\n");
perror("iconv failed!, propably the encoding is already Latin, msg encoding will be kept!\n");
strncpy(output, input, size);
return -1;
} else if (errno == EINVAL) {
/* Input conversion stopped due to an incomplete
* character or shift sequence at the end of the
* input buffer.
*/
printf("Input conversion stopped due to an incomplete character or shift sequence at the end of the input buffer.\n");
*out_buf= *in_buf;
out_buf++ ;out_left--;
in_buf++ ;in_left--;
}
}
} while (in_left > 0 && out_left > 0);
*out_buf = 0;
iconv_close(cd);
printf("*********************************************************\n");
printf("ISO-8859-1:\n %s\n", input, output);
return 0;
}

Related

Why does multibyte character to char32_t conversion use UTF-8 as the multibyte encoding instead of the locale-specific one?

I have been trying to convert Chinese character input from Windows command prompt in Big5 to UTF-8 by first converting the received input to char32_t in UTF-32 encoding, then convert it to UTF-8. I've been calling the function mbtoc32 from <uchar.h> to do this job, however it kept sending "Encoding error".
The following is the conditions I have encountered:
Converting the sequence (Big5) to a wchar_t representation by mbstowcs is successful.
mbrtoc32 takes the multibyte sequence as UTF-8, though the locale is not. (Set to "", returns "Chinese (Traditional)_Hong Kong SAR.950" on my machine)
Below is the code I've been writing to try to debug my problem, however no success. It tries to convert the "香" Chinese character (U+9999) into the multibyte representation, then tries to convert the Big5 encoding of "香" (0xADBB) into wchar_t and char32_t. However, converting from multibyte (Big5) to char32_t returns encoding error. (In contradictory, inputting the UTF-8 sequence of "香" to mbrtoc32 does return 0x9999 successfully)
#include <uchar.h>
#include <stdio.h>
#include <locale.h>
#include <stdlib.h>
mbstate_t state;
int main(void){
setlocale(LC_CTYPE, "");
printf("Your locale is: %s\n", setlocale(LC_CTYPE, NULL));
char32_t chi_c = 0x9999;
printf("Character U+9999 is 香\n");
char *mbc = (char *)calloc(32, sizeof(char));
size_t mb_len;
mb_len = c32rtomb(mbc, chi_c, &state);
int i;
printf("The multibyte representation of U+9999 is:\n");
// 0xE9A699, UTF-8
for (i = 0; i < mb_len; i++){
printf("%#2x\t", *(mbc + i));
}
char *src_mbs = (char *)calloc(32, sizeof(char));
// "香" in Big5 encoding
*(src_mbs + 0) = 0xad;
*(src_mbs + 1) = 0xbb;
wchar_t res_wc;
mbtowc(&res_wc, src_mbs, 32); // Success, res_wc == 0x9999
char32_t res_c32;
mb_len = mbrtoc32(&res_c32, src_mbs, (size_t)3, &state);
// Returns (size_t)-1, encoding error
if (mb_len == (size_t)-1){
perror("Encoding error");
return errno;
}
else {
printf("\nThe 32-bit character representation of U+9999 is:\n%#x", res_wc);
}
return 0;
}
I've also read documentation from cppreference.com, it said,
In any case, the multibyte character encoding used by this function is specified by the currently active C locale.
I expect mbrtoc32 to behave like mbtowc, which is converting the character from the locale-specific encoding to UTF-32 (in this case Big5 to UTF-32).
Is there any solutions to use mbrtoc32 to convert the multibyte character into char32_t without having the "Encoding error"?
P.S.: I'm using Mingw-64 on Windows 10, compiled with gcc.
I've found the problem. The Mingw-w64 I'm using is expecting all multi-byte string passed to mbrtoc32 and c32rtomb to be in UTF-8 encoding.
Code for mbrtoc32:
size_t mbrtoc32 (char32_t *__restrict__ pc32,
const char *__restrict__ s,
size_t n,
mbstate_t *__restrict__ __UNUSED_PARAM(ps))
{
if (*s == 0)
{
*pc32 = 0;
return 0;
}
/* ASCII character - high bit unset */
if ((*s & 0x80) == 0)
{
*pc32 = *s;
return 1;
}
/* Multibyte chars */
if ((*s & 0xE0) == 0xC0) /* 110xxxxx needs 2 bytes */
{
if (n < 2)
return (size_t)-2;
*pc32 = ((s[0] & 31) << 6) | (s[1] & 63);
return 2;
}
else if ((*s & 0xf0) == 0xE0) /* 1110xxxx needs 3 bytes */
{
if (n < 3)
return (size_t)-2;
*pc32 = ((s[0] & 15) << 12) | ((s[1] & 63) << 6) | (s[2] & 63);
return 3;
}
else if ((*s & 0xF8) == 0xF0) /* 11110xxx needs 4 bytes */
{
if (n < 4)
return (size_t)-2;
*pc32 = ((s[0] & 7) << 18) | ((s[1] & 63) << 12) | ((s[2] & 63) << 6) | (s[4] & 63);
return 4;
}
errno = EILSEQ;
return (size_t)-1;
}
and for c32rtomb:
size_t c32rtomb (char *__restrict__ s,
char32_t c32,
mbstate_t *__restrict__ __UNUSED_PARAM(ps))
{
if (c32 <= 0x7F) /* 7 bits needs 1 byte */
{
*s = (char)c32 & 0x7F;
return 1;
}
else if (c32 <= 0x7FF) /* 11 bits needs 2 bytes */
{
s[1] = 0x80 | (char)(c32 & 0x3F);
s[0] = 0xC0 | (char)(c32 >> 6);
return 2;
}
else if (c32 <= 0xFFFF) /* 16 bits needs 3 bytes */
{
s[2] = 0x80 | (char)(c32 & 0x3F);
s[1] = 0x80 | (char)((c32 >> 6) & 0x3F);
s[0] = 0xE0 | (char)(c32 >> 12);
return 3;
}
else if (c32 <= 0x1FFFFF) /* 21 bits needs 4 bytes */
{
s[3] = 0x80 | (char)(c32 & 0x3F);
s[2] = 0x80 | (char)((c32 >> 6) & 0x3F);
s[1] = 0x80 | (char)((c32 >> 12) & 0x3F);
s[0] = 0xF0 | (char)(c32 >> 18);
return 4;
}
errno = EILSEQ;
return (size_t)-1;
}
both of these functions expected the given multi-byte string to be in UTF-8 without considering the locale settings. Functions mbrtoc32 and c32rtomb on glibc simply calls their wide character counterpart to convert the characters. As
wide character convertions are working properly on Mingw-w64, I used mbrtowc and wcrtomb to replace mbrtoc32 and c32rtomb respectively like the way on glibc:
#include <uchar.h>
#include <stdio.h>
#include <locale.h>
#include <stdlib.h>
mbstate_t state;
int main(void){
setlocale(LC_CTYPE, "");
printf("Your locale is: %s\n", setlocale(LC_CTYPE, NULL));
char *src_mbs = "\xad\xbb"; // "香" in Big5 encoding
char32_t src_c32 = 0x9999; // "香" code point
unsigned char *r_mbc = (char *)calloc(32, sizeof(char));
if (r_mbc == NULL){
perror("Failed to allocate memory");
return errno;
}
size_t mb_len = wcrtomb(r_mbc, (wchar_t)src_c32, &state); // Returns 0xADBB, Big5 of "香", OK
printf("Character U+9999 is %s, ( ", r_mbc);
for (int i = 0; i < mb_len; i++){
printf("%#hhx ", *(r_mbc + i));
}
printf(")\n");
// mb_len = c32rtomb(r_mbc, src_c32, &state); // Returns 0xE9A699, UTF-8 representation of "香", expected Big5
// printf("\nThe multibyte representation of U+9999 is:\n");
// for (i = 0; i < mb_len; i++){
// printf("%#hhX\t", *(r_mbc + i));
// }
char32_t r_c32 = 0;
// mb_len = mbrtoc32(&r_c32, src_mbs, (size_t)3, &state);
// Returns (size_t)-1, encoding error
mb_len = mbrtowc((wchar_t *)&r_c32, src_mbs, (size_t)3, &state); // Returns 0x9999, OK
if (mb_len == (size_t)-1){
perror("Encoding error");
return errno;
}
else {
printf("\nThe 32-bit character representation of U+9999 is:\n%#x", r_c32);
}
return 0;
}

Use this char encoding function with only one parameter

After many search, I found the perfect function for my need here
Here is the code :
/* UTF-8 to ISO-8859-1/ISO-8859-15 mapper.
* Return 0..255 for valid ISO-8859-15 code points, 256 otherwise.
*/
static inline unsigned int to_latin9(const unsigned int code)
{
//printf("\ncode = %d", code);
/* Code points 0 to U+00FF are the same in both. */
if (code < 256U) {
return code;
}
switch (code) {
case 0x0152U: return 188U; /* U+0152 = 0xBC: OE ligature */
case 0x0153U: return 189U; /* U+0153 = 0xBD: oe ligature */
case 0x0160U: return 166U; /* U+0160 = 0xA6: S with caron */
case 0x0161U: return 168U; /* U+0161 = 0xA8: s with caron */
case 0x0178U: return 190U; /* U+0178 = 0xBE: Y with diaresis */
case 0x017DU: return 180U; /* U+017D = 0xB4: Z with caron */
case 0x017EU: return 184U; /* U+017E = 0xB8: z with caron */
case 0x20ACU: return 164U; /* U+20AC = 0xA4: Euro */
default: return 256U;
}
}
/* Convert an UTF-8 string to ISO-8859-15.
* All invalid sequences are ignored.
* Note: output == input is allowed,
* but input < output < input + length
* is not.
* Output has to have room for (length+1) chars, including the trailing NUL byte.
*/
size_t utf8_to_latin9(char *const output, const char *const input, const size_t length)
{
unsigned char *out = (unsigned char *)output;
const unsigned char *in = (const unsigned char *)input;
const unsigned char *const end = (const unsigned char *)input + length;
unsigned int c;
while (in < end)
if (*in < 128)
*(out++) = *(in++); /* Valid codepoint */
else
if (*in < 192)
in++; /* 10000000 .. 10111111 are invalid */
else
if (*in < 224) { /* 110xxxxx 10xxxxxx */
if (in + 1 >= end)
break;
if ((in[1] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x1FU)) << 6U)
| ((unsigned int)(in[1] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 2;
} else
if (*in < 240) { /* 1110xxxx 10xxxxxx 10xxxxxx */
if (in + 2 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x0FU)) << 12U)
| (((unsigned int)(in[1] & 0x3FU)) << 6U)
| ((unsigned int)(in[2] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 3;
} else
if (*in < 248) { /* 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
if (in + 3 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U &&
(in[3] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x07U)) << 18U)
| (((unsigned int)(in[1] & 0x3FU)) << 12U)
| (((unsigned int)(in[2] & 0x3FU)) << 6U)
| ((unsigned int)(in[3] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 4;
} else
if (*in < 252) { /* 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx */
if (in + 4 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U &&
(in[3] & 192U) == 128U &&
(in[4] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x03U)) << 24U)
| (((unsigned int)(in[1] & 0x3FU)) << 18U)
| (((unsigned int)(in[2] & 0x3FU)) << 12U)
| (((unsigned int)(in[3] & 0x3FU)) << 6U)
| ((unsigned int)(in[4] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 5;
} else
if (*in < 254) { /* 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx */
if (in + 5 >= end)
break;
if ((in[1] & 192U) == 128U &&
(in[2] & 192U) == 128U &&
(in[3] & 192U) == 128U &&
(in[4] & 192U) == 128U &&
(in[5] & 192U) == 128U) {
c = to_latin9( (((unsigned int)(in[0] & 0x01U)) << 30U)
| (((unsigned int)(in[1] & 0x3FU)) << 24U)
| (((unsigned int)(in[2] & 0x3FU)) << 18U)
| (((unsigned int)(in[3] & 0x3FU)) << 12U)
| (((unsigned int)(in[4] & 0x3FU)) << 6U)
| ((unsigned int)(in[5] & 0x3FU)) );
if (c < 256)
*(out++) = c;
}
in += 6;
} else
in++; /* 11111110 and 11111111 are invalid */
/* Terminate the output string. */
*out = '\0';
return (size_t)(out - (unsigned char *)output);
}
This work perfectly.
But this function take 1 buffer_input and return me a second buffer_output.
For optimize it, I would like use only one buffer_input and the function return me the buffer_input modified.
I don't know if it's possible. I have not enough knowledge for doing it myself (I have trouble to understanding this code).
It is hard to edit this code for what I want to achieve ?
For the moment I use like this :
utf8_to_latin9(buffer_output, buffer_input, length)
After edit I would like use like this :
utf8_to_latin9(buffer_input, buffer_input, length)
or
utf8_to_latin9(buffer_input, length)
I have not corrected (or checked) your code for any problems, I just modified it to work in-place. Note my comment regarding the faulty conversion in utf8_to_latin9(), for one.
The conversion to in-place operation is really simple:
Switch to one buffer parameter, make out point at the beginning of the input buffer (instead of a separate output buffer):
< size_t utf8_to_latin9(char *const output, const char *const input, const size_t length)
< {
< unsigned char *out = (unsigned char *)output;
----
> size_t utf8_to_latin9(char *const input, const size_t length)
> {
> unsigned char *out = (unsigned char *)input;
At the end of the conversion, calculate the return value based on the input buffer (instead of the separate output buffer):
< return (size_t)(out - (unsigned char *)output);
----
> return (size_t)(out - (unsigned char *)input);
Since there is no way the UTF-8 input buffer could be consumed at a slower rate than the Latin9 output buffer is being filled, this is quite safe.
My test main():
int main()
{
char input[256] = {0};
/* ¤ *//* € *//* some kanji */
strcpy( input, "\xc2\xa4\xe2\x82\xac\xf0\xaf\xa4\xa9" );
utf8_to_latin9( input, 9 );
printf( "%hhx ", input[0] );
printf( "%hhx ", input[1] );
printf( "%hhx ", input[2] );
printf( "%hhx ", input[3] );
printf( "%hhx ", input[4] );
printf( "%hhx ", input[5] );
printf( "%hhx ", input[6] );
printf( "%hhx ", input[7] );
printf( "%hhx\n", input[8] );
return 0;
}
Output:
a4 a4 0 82 ac f0 af a4 a9
That is what I would expect (showcasing the conversion problem I commented upon).

How to iterate through unicode characters and print them on the screen with printf in C?

I want to iterate through all (at least the 16 bit) unicode characters and print them on the screen with C.
I know there are related questions on SO but they don't solve the problem with printf in C, but this is what I want to achieve, if it's possible after all. I think it should be possible maybe with a trick I'm not aware of.
Since I want to use printf, I thought about something like this:
for (int i = 0x0000; i <= 0xffff; i++) {
//then somehow increment the string
char str[] = "\u25A1\n";
printf("%s", str);
char str[] = "\u25A2\n";
printf("%s", str);
char str[] = "\u25A3\n";
printf("%s", str);
...
}
But it's a bit of a problem to increment the unicode code point, here \u25A1. I'm aware it's not possible per se because some characters like \u0000 are not printable and the compiler says no. But apart from that, how could I increment from hexadecimal 0000 to ffff and print the character with printf.
If the __STDC_ISO_10646__ macro is defined, wide characters correspond to Unicode codepoints. So, assuming a locale that can represent the characters you are interested in, you can just printf() wide characters via the %lc format conversion:
#include <stdio.h>
#include <locale.h>
#ifndef __STDC_ISO_10646__
#error "Oops, our wide chars are not Unicode codepoints, sorry!"
#endif
int main()
{
int i;
setlocale(LC_ALL, "");
for (i = 0; i < 0xffff; i++) {
printf("%x - %lc\n", i, i);
}
return 0;
}
In C99, you can use wide character to multibyte character conversion functions wctomb() or wcrtomb() to convert each code point to a local representation, using the current character set. (The code points are in the current character set, not Unicode.) Remember to use setlocale() to ensure conversion functions are aware of the user locale (most importantly, the current character set used). The conversion functions use the LC_CTYPE category, but you should still use setlocale(LC_ALL, ""); as for any other locale-aware program.
(Not all systems have the C.UTF-8 locale installed, so I do not recommend trying to override the locale to the standard C with UTF-8 using setlocale(LC_ALL, "C.UTF-8");. It works on some systems, but not all. AFAIK it does not work in Fedora-based Linux distributions, for example.)
Because you want to output all Unicode code points, I suggest a different approach: Use one of the Universal Character Set Transformation Formats, i.e. UTF-8, UTF-16 (UCS-2 was superseded by UTF-16 in 1996), or UTF-32 (also known as UCS-4). UTF-8 is the one most often used on the Web -- in particular, on this very web page you're looking at right now -- and is very easy to use.
For further reading on why you should prefer UTF-8 over "native wide strings", see utf8everywhere.org.
If you want truly portable code, you can use this header file, utf8.h, to convert UTF-8 to unicode code points (utf8_to_code()) and Unicode code points to UTF-8 (code_to_utf8()):
#ifndef UTF8_H
#define UTF8_H
#include <stdlib.h>
#include <errno.h>
#define UTF8_MAXLEN 6
static size_t utf8_to_code(const unsigned char *const buffer, unsigned int *const codeptr)
{
if (!buffer) {
errno = EINVAL;
return 0;
}
if (*buffer == 0U) {
errno = 0;
return 0;
}
if (*buffer < 128U) {
if (codeptr)
*codeptr = buffer[0];
return 1;
}
if (*buffer < 192U) {
errno = EILSEQ;
return 0;
}
if (*buffer < 224U) {
if (buffer[1] >= 128U && buffer[1] < 192U)
return ((buffer[0] - 192U) << 6U)
| (buffer[1] - 128U);
errno = EILSEQ;
return 0;
}
if (*buffer < 240U) {
if (buffer[1] >= 128U && buffer[1] < 192U &&
buffer[2] >= 128U && buffer[2] < 192U)
return ((buffer[0] - 224U) << 12U)
| ((buffer[1] - 128U) << 6U)
| (buffer[2] - 128U);
errno = EILSEQ;
return 0;
}
if (*buffer < 248U) {
if (buffer[1] >= 128U && buffer[1] < 192U &&
buffer[2] >= 128U && buffer[2] < 192U &&
buffer[3] >= 128U && buffer[3] < 192U)
return ((buffer[0] - 240U) << 18U)
| ((buffer[1] - 128U) << 12U)
| ((buffer[2] - 128U) << 6U)
| (buffer[3] - 128U);
errno = EILSEQ;
return 0;
}
if (*buffer < 252U) {
if (buffer[1] >= 128U && buffer[1] < 192U &&
buffer[2] >= 128U && buffer[2] < 192U &&
buffer[3] >= 128U && buffer[3] < 192U &&
buffer[4] >= 128U && buffer[4] < 192U)
return ((buffer[0] - 248U) << 24U)
| ((buffer[1] - 128U) << 18U)
| ((buffer[2] - 128U) << 12U)
| ((buffer[3] - 128U) << 6U)
| (buffer[4] - 128U);
errno = EILSEQ;
return 0;
}
if (*buffer < 254U) {
if (buffer[1] >= 128U && buffer[1] < 192U &&
buffer[2] >= 128U && buffer[2] < 192U &&
buffer[3] >= 128U && buffer[3] < 192U &&
buffer[4] >= 128U && buffer[4] < 192U &&
buffer[5] >= 128U && buffer[5] < 192U)
return ((buffer[0] - 252U) << 30U)
| ((buffer[1] - 128U) << 24U)
| ((buffer[2] - 128U) << 18U)
| ((buffer[3] - 128U) << 12U)
| ((buffer[4] - 128U) << 6U)
| (buffer[5] - 128U);
errno = EILSEQ;
return 0;
}
errno = EILSEQ;
return 0;
}
static size_t code_to_utf8(unsigned char *const buffer, const unsigned int code)
{
if (code < 128U) {
buffer[0] = code;
return 1;
}
if (code < 2048U) {
buffer[0] = 0xC0U | (code >> 6U);
buffer[1] = 0x80U | (code & 0x3FU);
return 2;
}
if (code < 65536) {
buffer[0] = 0xE0U | (code >> 12U);
buffer[1] = 0x80U | ((code >> 6U) & 0x3FU);
buffer[2] = 0x80U | (code & 0x3FU);
return 3;
}
if (code < 2097152U) {
buffer[0] = 0xF0U | (code >> 18U);
buffer[1] = 0x80U | ((code >> 12U) & 0x3FU);
buffer[2] = 0x80U | ((code >> 6U) & 0x3FU);
buffer[3] = 0x80U | (code & 0x3FU);
return 4;
}
if (code < 67108864U) {
buffer[0] = 0xF8U | (code >> 24U);
buffer[1] = 0x80U | ((code >> 18U) & 0x3FU);
buffer[2] = 0x80U | ((code >> 12U) & 0x3FU);
buffer[3] = 0x80U | ((code >> 6U) & 0x3FU);
buffer[4] = 0x80U | (code & 0x3FU);
return 5;
}
if (code <= 2147483647U) {
buffer[0] = 0xFCU | (code >> 30U);
buffer[1] = 0x80U | ((code >> 24U) & 0x3FU);
buffer[2] = 0x80U | ((code >> 18U) & 0x3FU);
buffer[3] = 0x80U | ((code >> 12U) & 0x3FU);
buffer[4] = 0x80U | ((code >> 6U) & 0x3FU);
buffer[5] = 0x80U | (code & 0x3FU);
return 6;
}
errno = EINVAL;
return 0;
}
#endif /* UTF8_H */
It is not fast, but it should be easy to understand, and supports all possible Unicode code points (U+0000 to U+10FFFF, inclusive), on all systems with at least 32-bit unsigned ints. On systems with 16-bit unsigned ints, your compiler may warn about unreachable code, and it'll only support the first 65536 code points (U+0000 to U+FFFF).
Using above utf8.h, you can easily write a C program that outputs a HTML page containing the Unicode characters you want (excluding control characters U+0000-U+001F and U+007F-U+00BF, inclusive, and invalid code points U+D800-U+DFFF, inclusive). For example, page.c:
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include "utf8.h"
int main(void)
{
unsigned char ch[UTF8_MAXLEN + 1];
unsigned int i;
const char *str;
size_t n, len;
/* HTML5 DOCTYPE */
printf("<!DOCTYPE html>\n");
printf("<html>\n");
/* Header part. */
printf(" <head>\n");
printf(" <title> Unicode character list </title>\n");
printf(" <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">\n");
printf(" <style type=\"text/css\">\n");
/* with internal CSS stylesheet: */
printf(" html {\n");
printf(" font-family: \"DejaVu Mono\", \"Courier New\", \"Courier\", monospace;\n");
printf(" font-weight: normal;\n");
printf(" font-size: 100%%;\n");
printf(" text-decoration: none;\n");
printf(" background: #f7f7f7;\n");
printf(" color: #000000;\n");
printf(" padding: 0 0 0 0;\n");
printf(" border: 0 none;\n");
printf(" margin: 0 0 0 0\n");
printf(" }\n");
printf(" body {\n");
printf(" background: #ffffff;\n");
printf(" padding: 0.5em 1em 0.5em 1em;\n");
printf(" border: 1px solid #cccccc;\n");
printf(" margin: 0 auto auto auto;\n");
printf(" width: 12em;\n");
printf(" text-align: center;\n");
printf(" }\n");
printf(" p {\n");
printf(" padding: 0 0 0 0;\n");
printf(" border: 0 none;\n");
printf(" margin: 0 0 0 0;\n");
printf(" outline: 0 none;\n");
printf(" text-align: center;\n");
printf(" }\n");
printf(" p.odd {\n");
printf(" background: #efefef;\n");
printf(" }\n");
printf(" p.even {\n");
printf(" background: #f7f7f7;\n");
printf(" }\n");
printf(" span.code {\n");
printf(" width: 8em;\n");
printf(" text-align: right;\n");
printf(" }\n");
printf(" span.char {\n");
printf(" width: 4em;\n");
printf(" text-align: left;\n");
printf(" }\n");
printf(" </style>\n");
printf(" </head>\n");
/* Body part. */
printf(" <body>\n");
n = 0;
for (i = 0U; i <= 0xFFFFU; i++) {
/* Skip Unicode control characters. */
if ((i >= 0U && i <= 31U) ||
(i >= 127U && i <= 159U))
continue;
/* Skip invalid Unicode code points. */
if (i >= 0xD800U && i <= 0xDFFFU)
continue;
len = code_to_utf8(ch, i);
if (len > 0) {
ch[len] = '\0';
/* HTML does not like " & < > */
if (i == 32U)
str = " ";
else
if (i == 34U)
str = """;
else
if (i == 38U)
str = "&";
else
if (i == 60U)
str = "<";
else
if (i == 62U)
str = ">";
else
str = (const char *)ch;
if (n & 1) {
printf(" <p class=\"odd\" title=\"%u in decimal, &#%u; = %s\">", i, i, str);
printf("<span class=\"code\">U+%04X</span>", i);
printf(" <span class=\"char\">%s</span>", str);
printf("</p>\n");
} else {
printf(" <p class=\"even\" title=\"%u in decimal, &#%u; = %s\">", i, i, str);
printf("<span class=\"code\">U+%04X</span>", i);
printf(" <span class=\"char\">%s</span>", str);
printf("</p>\n");
}
n++;
}
}
printf(" </body>\n");
printf("</html>\n");
return EXIT_SUCCESS;
}
Redirect the output to a file, and you can open the file in whatever browser you prefer. If your browser is sane, and does not treat local files any different to those it obtains from a web server, then you should see the correct output.
(If you see multiple characters per code point after U+00A0, your browser has decided that because the file is local, it is using a different character set that it explicitly states it uses. Switch to a sane browser if that happens, or override the character set selection.)
If you want, you can just print the codes out as UTF-8 text, say using text.c:
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include "utf8.h"
int main(void)
{
unsigned char ch[UTF8_MAXLEN + 1];
unsigned int i;
size_t len;
for (i = 0U; i <= 0xFFFFU; i++) {
/* Skip Unicode control characters. */
if ((i >= 0U && i <= 31U) ||
(i >= 127U && i <= 159U))
continue;
/* Skip invalid Unicode code points. */
if (i >= 0xD800U && i <= 0xDFFFU)
continue;
len = code_to_utf8(ch, i);
if (len > 0) {
ch[len] = '\0';
printf("U+%04X %s \n", i, ch);
}
}
return EXIT_SUCCESS;
}
but then you must either be sure your terminal or terminal emulator supports UTF-8 and uses an UTF-8 locale, or you redirect the output to a text file and open that file in an editor which either assumes the file uses UTF-8 or lets you explicitly select the UTF-8 character set.
Note that there is a space before and after each character. Because some of the code points are combining characters, they may not show up at all unless they can be combined with another character, and most (all?) combine with space just fine.
If you use Windows, then you must conform to Microsoft stupidity, and add a special "byte order mark" -- printf("\xEF\xBB\xBF"); -- to the beginning of the output, so that its utilities like Notepad recognizes the file as UTF-8. It's a Windows-only wart, and treat it as such.
Questions?
The function to convert a 16-bit Unicode codepoint to a multibyte character sequence is c16rtomb; there is also c32rtomb if you want to handle 32-bit codepoints:
#include <uchar.h>
mbstate_t ps;
char buf[MB_CUR_MAX];
size_t bytes = c16rtomb(buf, i, &ps);
if (bytes != (size_t) -1) {
printf("%.*s\n", bytes, buf);
}
If c16rtomb is not available you will need to use platform-specific facilities.
I would go for something like this (using raw UTF-8 encoding):
char unicode[3] = { 0x00, 0x00, 0x00 };
for(size_t i=0; i<0xffff; i++)
{
printf("%s\n", unicode);
uint16_t * code = &unicode[0];
*code = *code +1;
}
Define a string on 3 bytes, the last one is the NULL terminating byte allowing a display via printf
Consider the two first bytes as your 16-bit unicode and increment it on each loop
Of course it can be optimized as:
Many characters won't be displayable
The cast char* -> uint16_t is not very elegant (triggers a warning)
As there is 2 bytes for UTF-8 encoding it will actually browse 11 bits of codepoints. To get the 16 bits you might want to actually use uint32_t and define a 5 bytes char* buffer
[EDIT] As stated in the comment, this loop will actually generates a lot of invalid UTF-8 sequences.
Indeed, going from U+007F to U+0080 is a +1 for code points but in UTF-8 you jump from 0x7F to 0xC280: you need to exclude some ranges in the loop.

Mapping multibyte characters to their unicode point representation

How do you map a single UTF-8 character to its unicode point in C?
[For example, È would be mapped to 00c8].
If your platform's wchar_t stores unicode (if it's a 32-bit type, it probably does) and you have an UTF-8 locale, you can call mbrtowc (from C90.1).
mbstate_t state = {0};
wchar_t wch;
char s[] = "\303\210";
size_t n;
memset(&state, 0, sizeof(state));
setlocale(LC_CTYPE, "en_US.utf8"); /*error checking omitted*/
n = mbrtowc(&wch, s, strlen(s), &state);
if (n <= (size_t)-2) printf("%lx\n", (unsigned long)wch);
For more flexibility, you can call the iconv interface.
char s[] = "\303\210";
iconv_t cd = iconv_open("UTF-8", "UCS-4");
if (cd != -1) {
char *inp = s;
size_t ins = strlen(s);
uint32_t c;
uint32_t *outp = &c;
size_t outs = 0;
if (iconv(cd, &inp, &ins, &outp, &outs) + 1 >= 2) printf("%lx\n", c);
iconv_close(cd);
}
Some things to look at :
libiconv
ConvertUTF.h
MultiByteToWideChar (under windows)
An reasonably fast implementation of an UTF-8 to UCS-2 converter. Surrogate and characters outside the BMP left as exercice.
The function returns the number of bytes consumed from the input s string. A negative value represents an error.
The resulting unicode character is put at the address p points to.
int utf8_to_wchar(wchar_t *p, const char *s)
{
const unsigned char *us = (const unsigned char *)s;
p[0] = 0;
if(!*us)
return 0;
else
if(us[0] < 0x80) {
p[0] = us[0];
return 1;
}
else
if(((us[0] & 0xE0) == 0xC0) && (us[1] & 0xC0) == 0x80) {
p[0] = ((us[0] & 0x1F) << 6) | (us[1] & 0x3F);
#ifdef DETECT_OVERLONG
if(p[0] < 0x80) return -2;
#endif
return 2;
}
else
if(((us[0] & 0xF0) == 0xE0) && (us[1] & 0xC0) == 0x80 && (us[2] & 0xC0) == 0x80) {
p[0] = ((us[0] & 0x0F) << 12) | ((us[1] & 0x3F) << 6) | (us[2] & 0x3F);
#ifdef DETECT_OVERLONG
if(p[0] < 0x800) return -2;
#endif
return 3;
}
return -1;
}

What is a better method for packing 4 bytes into 3 than this?

I have an array of values all well within the range 0 - 63, and decided I could pack every 4 bytes into 3 because the values only require 6 bits and I could use the extra 2bits to store the first 2 bits of the next value and so on.
Having never done this before I used the switch statement and a nextbit variable (a state machine like device) to do the packing and keep track of the starting bit. I'm convinced however, there must be a better way.
Suggestions/clues please, but don't ruin my fun ;-)
Any portability problems regarding big/little endian?
btw: I have verified this code is working, by unpacking it again and comparing with the input. And no it ain't homework, just an exercise I've set myself.
/* build with gcc -std=c99 -Wconversion */
#define ASZ 400
typedef unsigned char uc_;
uc_ data[ASZ];
int i;
for (i = 0; i < ASZ; ++i) {
data[i] = (uc_)(i % 0x40);
}
size_t dl = sizeof(data);
printf("sizeof(data):%z\n",dl);
float fpl = ((float)dl / 4.0f) * 3.0f;
size_t pl = (size_t)(fpl > (float)((int)fpl) ? fpl + 1 : fpl);
printf("length of packed data:%z\n",pl);
for (i = 0; i < dl; ++i)
printf("%02d ", data[i]);
printf("\n");
uc_ * packeddata = calloc(pl, sizeof(uc_));
uc_ * byte = packeddata;
uc_ nextbit = 1;
for (int i = 0; i < dl; ++i) {
uc_ m = (uc_)(data[i] & 0x3f);
switch(nextbit) {
case 1:
/* all 6 bits of m into first 6 bits of byte: */
*byte = m;
nextbit = 7;
break;
case 3:
/* all 6 bits of m into last 6 bits of byte: */
*byte++ = (uc_)(*byte | (m << 2));
nextbit = 1;
break;
case 5:
/* 1st 4 bits of m into last 4 bits of byte: */
*byte++ = (uc_)(*byte | ((m & 0x0f) << 4));
/* 5th and 6th bits of m into 1st and 2nd bits of byte: */
*byte = (uc_)(*byte | ((m & 0x30) >> 4));
nextbit = 3;
break;
case 7:
/* 1st 2 bits of m into last 2 bits of byte: */
*byte++ = (uc_)(*byte | ((m & 0x03) << 6));
/* next (last) 4 bits of m into 1st 4 bits of byte: */
*byte = (uc_)((m & 0x3c) >> 2);
nextbit = 5;
break;
}
}
So, this is kinda like code-golf, right?
#include <stdlib.h>
#include <string.h>
static void pack2(unsigned char *r, unsigned char *n) {
unsigned v = n[0] + (n[1] << 6) + (n[2] << 12) + (n[3] << 18);
*r++ = v;
*r++ = v >> 8;
*r++ = v >> 16;
}
unsigned char *apack(const unsigned char *s, int len) {
unsigned char *s_end = s + len,
*r, *result = malloc(len/4*3+3),
lastones[4] = { 0 };
if (result == NULL)
return NULL;
for(r = result; s + 4 <= s_end; s += 4, r += 3)
pack2(r, s);
memcpy(lastones, s, s_end - s);
pack2(r, lastones);
return result;
}
Check out the IETF RFC 4648 for 'The Base16, Base32 and Base64 Data Encodings'.
Partial code critique:
size_t dl = sizeof(data);
printf("sizeof(data):%d\n",dl);
float fpl = ((float)dl / 4.0f) * 3.0f;
size_t pl = (size_t)(fpl > (float)((int)fpl) ? fpl + 1 : fpl);
printf("length of packed data:%d\n",pl);
Don't use the floating point stuff - just use integers. And use '%z' to print 'size_t' values - assuming you've got a C99 library.
size_t pl = ((dl + 3) / 4) * 3;
I think your loop could be simplified by dealing with 3-byte input units until you've got a partial unit left over, and then dealing with a remainder of 1 or 2 bytes as special cases. I note that the standard referenced says that you use one or two '=' signs to pad at the end.
I have a Base64 encoder and decode which does some of that. You are describing the 'decode' part of Base64 -- where the Base64 code has 4 bytes of data that should be stored in just 3 - as your packing code. The Base64 encoder corresponds to the unpacker you will need.
Base-64 Decoder
Note: base_64_inv is an array of 256 values, one for each possible input byte value; it defines the correct decoded value for each encoded byte. In the Base64 encoding, this is a sparse array - 3/4 zeroes. Similarly, base_64_map is the mapping between a value 0..63 and the corresponding storage value.
enum { DC_PAD = -1, DC_ERR = -2 };
static int decode_b64(int c)
{
int b64 = base_64_inv[c];
if (c == base64_pad)
b64 = DC_PAD;
else if (b64 == 0 && c != base_64_map[0])
b64 = DC_ERR;
return(b64);
}
/* Decode 4 bytes into 3 */
static int decode_quad(const char *b64_data, char *bin_data)
{
int b0 = decode_b64(b64_data[0]);
int b1 = decode_b64(b64_data[1]);
int b2 = decode_b64(b64_data[2]);
int b3 = decode_b64(b64_data[3]);
int bytes;
if (b0 < 0 || b1 < 0 || b2 == DC_ERR || b3 == DC_ERR || (b2 == DC_PAD && b3 != DC_PAD))
return(B64_ERR_INVALID_ENCODED_DATA);
if (b2 == DC_PAD && (b1 & 0x0F) != 0)
/* 3rd byte is '='; 2nd byte must end with 4 zero bits */
return(B64_ERR_INVALID_TRAILING_BYTE);
if (b2 >= 0 && b3 == DC_PAD && (b2 & 0x03) != 0)
/* 4th byte is '='; 3rd byte is not '=' and must end with 2 zero bits */
return(B64_ERR_INVALID_TRAILING_BYTE);
bin_data[0] = (b0 << 2) | (b1 >> 4);
bytes = 1;
if (b2 >= 0)
{
bin_data[1] = ((b1 & 0x0F) << 4) | (b2 >> 2);
bytes = 2;
}
if (b3 >= 0)
{
bin_data[2] = ((b2 & 0x03) << 6) | (b3);
bytes = 3;
}
return(bytes);
}
/* Decode input Base-64 string to original data. Output length returned, or negative error */
int base64_decode(const char *data, size_t datalen, char *buffer, size_t buflen)
{
size_t outlen = 0;
if (datalen % 4 != 0)
return(B64_ERR_INVALID_ENCODED_LENGTH);
if (BASE64_DECLENGTH(datalen) > buflen)
return(B64_ERR_OUTPUT_BUFFER_TOO_SMALL);
while (datalen >= 4)
{
int nbytes = decode_quad(data, buffer + outlen);
if (nbytes < 0)
return(nbytes);
outlen += nbytes;
data += 4;
datalen -= 4;
}
assert(datalen == 0); /* By virtue of the %4 check earlier */
return(outlen);
}
Base-64 Encoder
/* Encode 3 bytes of data into 4 */
static void encode_triplet(const char *triplet, char *quad)
{
quad[0] = base_64_map[(triplet[0] >> 2) & 0x3F];
quad[1] = base_64_map[((triplet[0] & 0x03) << 4) | ((triplet[1] >> 4) & 0x0F)];
quad[2] = base_64_map[((triplet[1] & 0x0F) << 2) | ((triplet[2] >> 6) & 0x03)];
quad[3] = base_64_map[triplet[2] & 0x3F];
}
/* Encode 2 bytes of data into 4 */
static void encode_doublet(const char *doublet, char *quad, char pad)
{
quad[0] = base_64_map[(doublet[0] >> 2) & 0x3F];
quad[1] = base_64_map[((doublet[0] & 0x03) << 4) | ((doublet[1] >> 4) & 0x0F)];
quad[2] = base_64_map[((doublet[1] & 0x0F) << 2)];
quad[3] = pad;
}
/* Encode 1 byte of data into 4 */
static void encode_singlet(const char *singlet, char *quad, char pad)
{
quad[0] = base_64_map[(singlet[0] >> 2) & 0x3F];
quad[1] = base_64_map[((singlet[0] & 0x03) << 4)];
quad[2] = pad;
quad[3] = pad;
}
/* Encode input data as Base-64 string. Output length returned, or negative error */
static int base64_encode_internal(const char *data, size_t datalen, char *buffer, size_t buflen, char pad)
{
size_t outlen = BASE64_ENCLENGTH(datalen);
const char *bin_data = (const void *)data;
char *b64_data = (void *)buffer;
if (outlen > buflen)
return(B64_ERR_OUTPUT_BUFFER_TOO_SMALL);
while (datalen >= 3)
{
encode_triplet(bin_data, b64_data);
bin_data += 3;
b64_data += 4;
datalen -= 3;
}
b64_data[0] = '\0';
if (datalen == 2)
encode_doublet(bin_data, b64_data, pad);
else if (datalen == 1)
encode_singlet(bin_data, b64_data, pad);
b64_data[4] = '\0';
return((b64_data - buffer) + strlen(b64_data));
}
I complicate life by having to deal with a product that uses a variant alphabet for the Base64 encoding, and also manages not to pad data - hence the 'pad' argument (which can be zero for 'null padding' or '=' for standard padding. The 'base_64_map' array contains the alphabet to use for 6-bit values in the range 0..63.
Another simpler way to do it would be to use bit fields. One of the lesser known corners of C struct syntax is the big field. Let's say you have the following structure:
struct packed_bytes {
byte chunk1 : 6;
byte chunk2 : 6;
byte chunk3 : 6;
byte chunk4 : 6;
};
This declares chunk1, chunk2, chunk3, and chunk4 to have the type byte but to only take up 6 bits in the structure. The result is that sizeof(struct packed_bytes) == 3. Now all you need is a little function to take your array and dump it into the structure like so:
void
dump_to_struct(byte *in, struct packed_bytes *out, int count)
{
int i, j;
for (i = 0; i < (count / 4); ++i) {
out[i].chunk1 = in[i * 4];
out[i].chunk2 = in[i * 4 + 1];
out[i].chunk3 = in[i * 4 + 2];
out[i].chunk4 = in[i * 4 + 3];
}
// Finish up
switch(struct % 4) {
case 3:
out[count / 4].chunk3 = in[(count / 4) * 4 + 2];
case 2:
out[count / 4].chunk2 = in[(count / 4) * 4 + 1];
case 1:
out[count / 4].chunk1 = in[(count / 4) * 4];
}
}
There you go, you now have an array of struct packed_bytes that you can easily read by using the above struct.
Instead of using a statemachine you can simply use a counter for how many bits are already used in the current byte, from which you can directly derive the shift-offsets and whether or not you overflow into the next byte.
Regarding the endianess: As long as you use only a single datatype (that is you don't reinterpret pointer to types of different size (e.g. int* a =...;short* b=(short*) a;) you shouldn't get problems with endianess in most cases
Taking elements of DigitalRoss's compact code, Grizzly's suggestion, and my own code, I have written my own answer at last. Although DigitalRoss provides a usable working answer, my usage of it without understanding, would not have provided the same satisfaction as to learning something. For this reason I have chosen to base my answer on my original code.
I have also chosen to ignore the advice Jonathon Leffler gives to avoid using floating point arithmetic for the calculation of the packed data length. Both the recommended method given - the same DigitalRoss also uses, increases the length of the packed data by as much as three bytes. Granted this is not much, but is also avoidable by the use of floating point math.
Here is the code, criticisms welcome:
/* built with gcc -std=c99 */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
unsigned char *
pack(const unsigned char * data, size_t len, size_t * packedlen)
{
float fpl = ((float)len / 4.0f) * 3.0f;
*packedlen = (size_t)(fpl > (float)((int)fpl) ? fpl + 1 : fpl);
unsigned char * packed = malloc(*packedlen);
if (!packed)
return 0;
const unsigned char * in = data;
const unsigned char * in_end = in + len;
unsigned char * out;
for (out = packed; in + 4 <= in_end; in += 4) {
*out++ = in[0] | ((in[1] & 0x03) << 6);
*out++ = ((in[1] & 0x3c) >> 2) | ((in[2] & 0x0f) << 4);
*out++ = ((in[2] & 0x30) >> 4) | (in[3] << 2);
}
size_t lastlen = in_end - in;
if (lastlen > 0) {
*out = in[0];
if (lastlen > 1) {
*out++ |= ((in[1] & 0x03) << 6);
*out = ((in[1] & 0x3c) >> 2);
if (lastlen > 2) {
*out++ |= ((in[2] & 0x0f) << 4);
*out = ((in[2] & 0x30) >> 4);
if (lastlen > 3)
*out |= (in[3] << 2);
}
}
}
return packed;
}
int main()
{
size_t i;
unsigned char data[] = {
12, 15, 40, 18,
26, 32, 50, 3,
7, 19, 46, 10,
25, 37, 2, 39,
60, 59, 0, 17,
9, 29, 13, 54,
5, 6, 47, 32
};
size_t datalen = sizeof(data);
printf("unpacked datalen: %td\nunpacked data\n", datalen);
for (i = 0; i < datalen; ++i)
printf("%02d ", data[i]);
printf("\n");
size_t packedlen;
unsigned char * packed = pack(data, sizeof(data), &packedlen);
if (!packed) {
fprintf(stderr, "Packing failed!\n");
return EXIT_FAILURE;
}
printf("packedlen: %td\npacked data\n", packedlen);
for (i = 0; i < packedlen; ++i)
printf("0x%02x ", packed[i]);
printf("\n");
free(packed);
return EXIT_SUCCESS;
}

Resources