as a part of a C program I wrote the following function, which finds the second smallest element of an array
unsigned int array_second_min (unsigned int w[], unsigned int n)
{
unsigned int i, erst = UINT_MAX, zweit = UINT_MAX, count = 0;
if (n < 2)
return UINT_MAX;
for (i = 0;i < n; i++) {
if (w[i] == w[i + 1])
count++;
}
if (count == n - 1)
return UINT_MAX;
for (i = 0;i < n;i++) {
if (w[i] < erst)
erst = w[i];
}
for (i = 0;i < n;i++) {
if (w[i] == erst)
continue;
if ((w[i] - erst) < zweit)
zweit = w[i];
}
return zweit;
}
the problem is that it is not really functioning as it should. I think the problem is in the last for loop, but am not sure about that.
Thank you for your help
picture of the output:
The following code will return the second smallest element
unsigned int array_second_min (unsigned int w[], unsigned int n){
unsigned int i, first = UINT_MAX, second = UINT_MAX;
if(n < 2)
return UINT_MAX;
sort(w, w+n);
second = w[n-2];
return second;
}
This is a somewhat more efficient solution, as it is O(n):
struct pair {
int r[2];
};
struct pair small2(int *a, int n) {
int r[2];
int order;
r[0] = a[0];
r[1] = a[1];
order = (r[0] >= r[1]);
for (int i = 2; i < n; i++) {
if (a[i] <= r[order]) {
r[!order] = a[i];
order = !order;
} else if (a[i] <= r[!order]) {
r[!order] = a[i];
}
}
struct pair x;
x.r[0] = r[order];
x.r[1] = r[!order];
return x;
}
There is a lack of detail about duplicates, this handles them in an unsurprising fashion. Note the trickiness of the order variable; this works because you are only interested in the least two, if you wanted the least 3, you would have to add the extra tests. It would remain O(n), but the C would be greater.
I want to construct two big number by array in c programming and make them add.
The following is my code:
void add(unsigned char* a, unsigned char* b, unsigned int len)
{
int i;
unsigned short T;
unsigned char carry = 0;
for (i = len - 1; i >= 0; --i)
{
T = (unsigned short)(a[i]) + (unsigned short)(b[i]) + (unsigned short)carry;
//T = a[i] + b[i] + carry;
if (T > 0xFF)
carry = 1;
else
carry = 0;
a[i] = (unsigned char)T;
}
}
The max value in array a and b for every element is 255.
EDIT1: The highest carry is discarded. The result is save in array a.
EDIT2: replace "Byte" with "carry".
The original code is :
Integer B1(B, SM3_BLOCK_SIZE);
++B1;
for (i = 0; i < ILen; i += v)
(Integer(I + i, v) + B1).Encode(I + i, v);
I write two new function. One is as the Above add(), The other is as following:
void add_one(unsigned char *arr, unsigned int len)
{
int i;
for (i = len-1; i >= 0; --i)
{
arr[len] += 1;
if (arr[len] != 0)
return;
}
}
If my code is rigth, the original code is as following:
add_one(B, SM3_BLOCK_SIZE);
for (i = 0; i < ILen; i += v)
add(I + i, B, SM3_BLOCK_SIZE);
There is (at least) one bug. Look at this code:
void add_one(unsigned char *arr, unsigned int len)
{
int i;
for (i = len-1; i >= 0; --i)
{
arr[len] += 1; // Indexing using len is wrong
if (arr[len] != 0) // Indexing using len is wrong
return;
}
}
You probably want to use i as index.
I assumed you know that you are implementing the add function for a bigendian positive integer.
Avoid using for (i = len-1; i >= 0; --i). You can catch a runtime error when i is unsigned and len is 0. Instead, use for (i = len; i-- > 0;).
If you need a little-endian integer than use for (int i = 0; i < len; ++i)
char add(unsigned char* a, unsigned char* b, unsigned int len)
{
unsigned short carry = 0;
//for (int i = 0; i < len; ++i) // for little-endian
for (int i = len; i-- > 0;) // for big-endian
{
carry += a[i] + b[i];
a[i] = carry & 0xFF;
carry >>= 8;
}
return carry;
}
Tests
unsigned char a[5] = {255,2,3,4,5};
unsigned char b[5] = {255,256-2,256-3,4,5};
char overflow = add(a,b,5);
printf("%d %d %d %d %d / %d",a[0],a[1],a[2],a[3],a[4] , overflow);
Output
255 1 0 8 10 / 1
Can you please help re-write this code. I am not able to understand how the bitwise leftshift actually works. If there is a easier way to represent this code then please let me know.
This code is to calculate the number of 1 (ones) in a bit representation of a number.
int numberofones( int value, int count) {
int numchars = 8 * sizeof(int);
int n;
for(n = 0; n < numchars; n++)
{
if(value & (1 << (numchars - 1 - n))) {
count++;
}
}
return count;
}
int numberofones( int value) {
int numchars = 8 * sizeof(int);
int n;
int count = 0 ;
for(n = 0; n < numchars; n++)
{
if(value & (1 << n))
count++;
}
return count;
}
I was trying to optimize the Radix Sort code, because I felt there was room for it as traditional codes in books and on web seem a direct copy of one another and also they work very slow as they take an arbitrary number such as 10 for modulo operation. I have optimized the code as far as I could go, maybe I might have missed some optimization techniques. In that case please enlighten me.
Motivation for optimization:
http://codercorner.com/RadixSortRevisited.htm
http://stereopsis.com/radix.html
I was unable to implement all the optimizations in the articles, mostly it was beyond my skills and understanding and lack of sufficient time, if you can feel free to implement them.
EDIT 4
This Java version of Radix Sort calculates all histograms in 1 read and does not need to fill array Z with zeros after every LSB sort along with the usual ability to skip sorting and jump to next LSB sorting if all previous LSB's are same. As usual this is only for 32-bit integers but a 64-bit version can be created from it.
protected static int[] DSC(int A[])// Sorts in descending order
{
int tmp[] = new int[A.length] ;
int Z[] = new int[1024] ;
int i, Jump, Jump2, Jump3, Jump4, swap[] ;
Jump = A[0] & 255 ;
Z[Jump] = 1 ;
Jump2 = ((A[0] >> 8) & 255) + 256 ;
Z[Jump2] = 1 ;
Jump3 = ((A[0] >> 16) & 255) + 512 ;
Z[Jump3] = 1 ;
Jump4 = (A[0] >> 24) + 768 ;
Z[Jump4] = 1 ;
// Histograms creation
for (i = 1 ; i < A.length; ++i)
{
++Z[A[i] & 255] ;
++Z[((A[i] >> 8) & 255) + 256] ;
++Z[((A[i] >> 16) & 255) + 512] ;
++Z[(A[i] >> 24) + 768] ;
}
// 1st LSB Byte Sort
if( Z[Jump] != A.length )
{
Z[0] = A.length - Z[0];
for (i = 1; i < 256; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[A[i] & 255]++] = A[i];
}
swap = A ; A = tmp ; tmp = swap ;
}
// 2nd LSB Byte Sort
if( Z[Jump2] != A.length )
{
Z[256] = A.length - Z[256];
for (i = 257; i < 512; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[((A[i] >> 8) & 255) + 256]++] = A[i];
}
swap = A ; A = tmp ; tmp = swap ;
}
// 3rd LSB Byte Sort
if( Z[Jump3] != A.length )
{
Z[512] = A.length - Z[512];
for (i = 513; i < 768; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[((A[i] >> 16) & 255) + 512]++] = A[i];
}
swap = A ; A = tmp ; tmp = swap ;
}
// 4th LSB Byte Sort
if( Z[Jump4] != A.length )
{
Z[768] = A.length - Z[768];
for (i = 769; i < Z.length; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[(A[i] >> 24) + 768]++] = A[i];
}
return tmp ;
}
return A ;
}
The Java version ran faster with != sign than == sign
if( Z[Jump] != A.length )
{
// lines of code
}...
but in C the below version was on average, 25% faster (with equalto sign) than its counterpart with != sign. Your hardware might react differently.
if( Z[Jump] == A.length );
else
{
// lines of code
}...
Below is the C code ( "long" on my machine is 32 bits )
long* Radix_2_ac_long(long *A, size_t N, long *Temp)// Sorts in ascending order
{
size_t Z[1024] = {0};
long *swp;
size_t i, Jump, Jump2, Jump3, Jump4;
// Sort-circuit set-up
Jump = *A & 255;
Z[Jump] = 1;
Jump2 = ((*A >> 8) & 255) + 256;
Z[Jump2] = 1;
Jump3 = ((*A >> 16) & 255) + 512;
Z[Jump3] = 1;
Jump4 = (*A >> 24) + 768;
Z[Jump4] = 1;
// Histograms creation
for(i = 1 ; i < N ; ++i)
{
++Z[*(A+i) & 255];
++Z[((*(A+i) >> 8) & 255) + 256];
++Z[((*(A+i) >> 16) & 255) + 512];
++Z[(*(A+i) >> 24) + 768];
}
// 1st LSB byte sort
if( Z[Jump] == N );
else
{
for( i = 1 ; i < 256 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[*(A+i) & 255] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 2nd LSB byte sort
if( Z[Jump2] == N );
else
{
for( i = 257 ; i < 512 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[((*(A+i) >> 8) & 255) + 256] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 3rd LSB byte sort
if( Z[Jump3] == N );
else
{
for( i = 513 ; i < 768 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[((*(A+i) >> 16) & 255) + 512] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 4th LSB byte sort
if( Z[Jump4] == N );
else
{
for( i = 769 ; i < 1024 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[(*(A+i) >> 24) + 768] + Temp) = *(A+i);
}
return Temp;
}
return A;
}
EDIT 5
The sort now handles negative numbers too. Only some minor/negligible tweaks to the code did it. It runs a little slower as a result but the effect is not significant. Coded in C, below ( "long" on my system is 32 bits )
long* Radix_Sort(long *A, size_t N, long *Temp)
{
size_t Z[1024] = {0};
long *swp;
size_t Jump, Jump2, Jump3, Jump4;
long i;
// Sort-circuit set-up
Jump = *A & 255;
Z[Jump] = 1;
Jump2 = ((*A >> 8) & 255) + 256;
Z[Jump2] = 1;
Jump3 = ((*A >> 16) & 255) + 512;
Z[Jump3] = 1;
Jump4 = ((*A >> 24) & 255) + 768;
Z[Jump4] = 1;
// Histograms creation
for(i = 1 ; i < N ; ++i)
{
++Z[*(A+i) & 255];
++Z[((*(A+i) >> 8) & 255) + 256];
++Z[((*(A+i) >> 16) & 255) + 512];
++Z[((*(A+i) >> 24) & 255) + 768];
}
// 1st LSB byte sort
if( Z[Jump] == N );
else
{
for( i = 1 ; i < 256 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[*(A+i) & 255] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 2nd LSB byte sort
if( Z[Jump2] == N );
else
{
for( i = 257 ; i < 512 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[((*(A+i) >> 8) & 255) + 256] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 3rd LSB byte sort
if( Z[Jump3] == N );
else
{
for( i = 513 ; i < 768 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[((*(A+i) >> 16) & 255) + 512] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 4th LSB byte sort and negative numbers sort
if( Z[Jump4] == N );
else
{
for( i = 897 ; i < 1024 ; ++i )// -ve values frequency starts after index 895, i.e at 896 ( 896 = 768 + 128 ), goes upto 1023
{
Z[i] = Z[i-1] + Z[i];
}
Z[768] = Z[768] + Z[1023];
for( i = 769 ; i < 896 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[((*(A+i) >> 24) & 255) + 768] + Temp) = *(A+i);
}
return Temp;
}
return A;
}
EDIT 6
Below is the pointer optimized version ( accesses array locations via pointers ) that takes on average, approximately 20% less time to sort than the one above. It also uses 4 separate arrays for faster address calculation ( "long" on my system is 32 bits ).
long* Radix_Sort(long *A, size_t N, long *Temp)
{
long Z1[256] ;
long Z2[256] ;
long Z3[256] ;
long Z4[256] ;
long T = 0 ;
while(T != 256)
{
*(Z1+T) = 0 ;
*(Z2+T) = 0 ;
*(Z3+T) = 0 ;
*(Z4+T) = 0 ;
++T;
}
size_t Jump, Jump2, Jump3, Jump4;
// Sort-circuit set-up
Jump = *A & 255 ;
Z1[Jump] = 1;
Jump2 = (*A >> 8) & 255 ;
Z2[Jump2] = 1;
Jump3 = (*A >> 16) & 255 ;
Z3[Jump3] = 1;
Jump4 = (*A >> 24) & 255 ;
Z4[Jump4] = 1;
// Histograms creation
long *swp = A + N;
long *i = A + 1;
for( ; i != swp ; ++i)
{
++Z1[*i & 255];
++Z2[(*i >> 8) & 255];
++Z3[(*i >> 16) & 255];
++Z4[(*i >> 24) & 255];
}
// 1st LSB byte sort
if( Z1[Jump] == N );
else
{
swp = Z1+256 ;
for( i = Z1+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A-1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z1[*i & 255] + Temp) = *i;
}
swp = A;
A = Temp;
Temp = swp;
}
// 2nd LSB byte sort
if( Z2[Jump2] == N );
else
{
swp = Z2+256 ;
for( i = Z2+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A-1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z2[(*i >> 8) & 255] + Temp) = *i;
}
swp = A;
A = Temp;
Temp = swp;
}
// 3rd LSB byte sort
if( Z3[Jump3] == N );
else
{
swp = Z3 + 256 ;
for( i = Z3+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A-1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z3[(*i >> 16) & 255] + Temp) = *i;
}
swp = A;
A = Temp;
Temp = swp;
}
// 4th LSB byte sort and negative numbers sort
if( Z4[Jump4] == N );
else
{
swp = Z4 + 256 ;
for( i = Z4+129 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
*Z4 = *Z4 + *(Z4+255) ;
swp = Z4 + 128 ;
for( i = Z4+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A - 1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z4[(*i >> 24) & 255] + Temp) = *i;
}
return Temp;
}
return A;
}
The edit 4 version is good enough if the original and temp arrays fit in cache. If the array size is much greater than cache size, most of the overhead is due to the random order writes to the arrays. A hybrid msb/lsb radix sort can avoid this issue. For example split the array into 256 bins according to the most significant byte, then do a lsb radix sort on each of the 256 bins. The idea here is that a pair (original and temp) of bins will fit within the cache, where random order writes are not an issue (for most cache implementations).
For a 8MB cache, the goal is for each of the bins to be < 4MB in size = 1 million 32 bit integers if the integers evenly distribute into the bins. This strategy would work for array size up to 256 million 32 bit integers. For larger arrays, the msb phase could split up the array into 1024 bins, for up to 1 billion 32 bit integers. On my system, sorting 16,777,216 (2^24) 32 bit integers with a classic 8,8,8,8 lsb radix sort took 0.45 seconds, while the hybrid 8 msb : 8,8,8 lsb took 0.24 seconds.
// split array into 256 bins according to most significant byte
void RadixSort(uint32_t * a, size_t count)
{
size_t aIndex[260] = {0}; // count / array
uint32_t * b = new uint32_t [count]; // allocate temp array
size_t i;
for(i = 0; i < count; i++) // generate histogram
aIndex[1+((size_t)(a[i] >> 24))]++;
for(i = 2; i < 257; i++) // convert to indices
aIndex[i] += aIndex[i-1];
for(i = 0; i < count; i++) // sort by msb
b[aIndex[a[i]>>24]++] = a[i];
for(i = 256; i; i--) // restore aIndex
aIndex[i] = aIndex[i-1];
aIndex[0] = 0;
for(i = 0; i < 256; i++) // radix sort the 256 bins
RadixSort3(&b[aIndex[i]], &a[aIndex[i]], aIndex[i+1]-aIndex[i]);
delete[] b;
}
// sort a bin by 3 least significant bytes
void RadixSort3(uint32_t * a, uint32_t *b, size_t count)
{
size_t mIndex[3][256] = {0}; // count / matrix
size_t i,j,m,n;
uint32_t u;
if(count == 0)
return;
for(i = 0; i < count; i++){ // generate histograms
u = a[i];
for(j = 0; j < 3; j++){
mIndex[j][(size_t)(u & 0xff)]++;
u >>= 8;
}
}
for(j = 0; j < 3; j++){ // convert to indices
m = 0;
for(i = 0; i < 256; i++){
n = mIndex[j][i];
mIndex[j][i] = m;
m += n;
}
}
for(j = 0; j < 3; j++){ // radix sort
for(i = 0; i < count; i++){ // sort by current lsb
u = a[i];
m = (size_t)(u>>(j<<3))&0xff;
b[mIndex[j][m]++] = u;
}
std::swap(a, b); // swap ptrs
}
}
Example code for classic lsb radix sorts:
Example C++ lsb radix sort using 8,8,8,8 bit fields:
typedef unsigned int uint32_t;
void RadixSort(uint32_t * a, size_t count)
{
size_t mIndex[4][256] = {0}; // count / index matrix
uint32_t * b = new uint32_t [count]; // allocate temp array
size_t i,j,m,n;
uint32_t u;
for(i = 0; i < count; i++){ // generate histograms
u = a[i];
for(j = 0; j < 4; j++){
mIndex[j][(size_t)(u & 0xff)]++;
u >>= 8;
}
}
for(j = 0; j < 4; j++){ // convert to indices
m = 0;
for(i = 0; i < 256; i++){
n = mIndex[j][i];
mIndex[j][i] = m;
m += n;
}
}
for(j = 0; j < 4; j++){ // radix sort
for(i = 0; i < count; i++){ // sort by current lsb
u = a[i];
m = (size_t)(u>>(j<<3))&0xff;
b[mIndex[j][m]++] = u;
}
std::swap(a, b); // swap ptrs
}
delete[] b;
}
Example C++ code using 16,16 bit fields:
typedef unsigned int uint32_t;
uint32_t * RadixSort(uint32_t * a, size_t count)
{
size_t mIndex[2][65536] = {0}; // count / index matrix
uint32_t * b = new uint32_t [count]; // allocate temp array
size_t i,j,m,n;
uint32_t u;
for(i = 0; i < count; i++){ // generate histograms
u = a[i];
for(j = 0; j < 2; j++){
mIndex[j][(size_t)(u & 0xffff)]++;
u >>= 16;
}
}
for(j = 0; j < 2; j++){ // convert to indices
m = 0;
for(i = 0; i < 65536; i++){
n = mIndex[j][i];
mIndex[j][i] = m;
m += n;
}
}
for(j = 0; j < 2; j++){ // radix sort
for(i = 0; i < count; i++){ // sort by current lsb
u = a[i];
m = (size_t)(u>>(j<<4))&0xffff;
b[mIndex[j][m]++] = u;
}
std::swap(a, b); // swap ptrs
}
delete[] b;
return(a);
}
N & 15 , N & 31 , N & 63 .... and so on , which of these bitwise
operations takes least time?
They are same. Do not take it bad, but optimizing for speed without knowing how long things last may end up quite bad. And even when you know the timing, hardware is very complicated nowadays and quite unpredictable. You program in java, that is another layer of insanely complex system. The same code may be faster today and slower tomorrow. Your say approximately 2.232891909840167 times faster. In reality, you have measurement on one hardware and software configuration with one set of data and you can only hope the measurement is representative enough. Unfortunately, it is not always the case.
I rewrote your function. It is shorter and simpler, yet does not seem to be slower. Compilers tend to like code that is not too clever, as there are many optimizations for simple cases. The correction for negative numbers is not particulary nice, you can delete it if you do not like it. It seems to work best for 8 bits and 11 bits, probably due to cache sizes, have a look at comments of rcgldr.
EDIT
#ytoamn you are right, if all is in the first bucket the loop should continue, not break. That was a bug. To the other changes, I would rather avoid the contract you have done now. I think there are three natural contracts for sorting function. First one is sorting the original array and returning null. Second is sorting the original array and return it. The third is returning new sorted array and keeping the original array intact. I like the first one, as its behaviour is unambiguous. The way you have it now you should add big warning to the documentation, that the original array has changed and is returned from the function is some cases and in other not. Second thing I would avoid is the old C code style. You should define loop variable in the loop if you need it only there. Defining it globally injects dependency that may lead to bugs. And it has no advantages here, as properly defined loop variables would share the space in the end anyway. Compiler is well aware of the scope, you should use the smallest scope you need.
EDIT2
Feel free to comment directly under my post :-) Local variables are just addresses on the stack. You allocate memory when constructing object which is not the case here. As for the array, think about this code:
public static void Tst(int[] A) {
int[] tmp = new int[A.length];
A[0] = 6;
A = tmp; // changes what parameter A contains
A[0] = 7;
}
public static void main(String[] args) {
int[] A = new int[1];
A[0] = 5;
Tst(A);
System.out.println(A[0]); //prints 6
}
It prints 6. Number 7 is written into tmp array only. Array A in main is not affected.
protected static void ASC2(int A[], int bits) {
int[] origA = A;
int[] tmp = new int[A.length];
int[] Z = new int[1 << bits];
int mask = (1 << bits) - 1;
for (int shift = 0; shift < 32; shift += bits) {
if (shift > 0) {
Arrays.fill(Z, 0);
}
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> shift) & mask]++;
}
if (Z[0] == A.length) {
continue; // all in first bucket
}
Z[Z.length - 1] = A.length - Z[Z.length - 1];
for (int i = Z.length - 2; i >= 0; --i) {
Z[i] = Z[i + 1] - Z[i];
}
if (shift + bits > 31) { // negative numbers correction
int halfLength = Z.length / 2;
int positSum = Z[halfLength];
int negSum = A.length - positSum;
if (negSum > 0) {
for (int i = 0; i < halfLength; ++i) {
Z[i] += negSum;
}
for (int i = halfLength; i < Z.length; ++i) {
Z[i] -= positSum;
}
}
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> shift) & mask]++] = A[i];
}
int[] swap = A;
A = tmp;
tmp = swap;
}
if (A != origA) {
System.arraycopy(A, 0, origA, 0, A.length);
}
}
EDIT3
Loop unroll is a valid technique, improving short circuiting is really nice. But with using array lengths as constants you definitely start to be too clever. If you hard coded the base size, why not hard code it all like this:
protected static int[] DSC2(int A[])// sorts in descending order
{
int tmp[] = new int[A.length];
int Z[] = new int[256];
int sample, swap[];
// 1st LSB byte extraction
sample = A[0] & 255;
for (int i = 0; i < A.length; ++i) {
Z[A[i] & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[A[i] & 255]++] = A[i];
}
swap = A;
A = tmp;
tmp = swap;
Arrays.fill(Z, 0);
} else {
Z[sample] = 0;
}
// 2nd LSB byte extraction
sample = (A[0] >> 8) & 255;
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> 8) & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> 8) & 255]++] = A[i];
}
swap = A;
A = tmp;
tmp = swap;
Arrays.fill(Z, 0);
} else {
Z[sample] = 0;
}
// 3rd LSB byte extraction
sample = (A[0] >> 16) & 255;
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> 16) & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> 16) & 255]++] = A[i];
}
swap = A;
A = tmp;
tmp = swap;
Arrays.fill(Z, 0);
} else {
Z[sample] = 0;
}
// 4th LSB byte extraction
sample = (A[0] >> 24) & 255;
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> 24) & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> 24) & 255]++] = A[i];
}
A = tmp;
}
return A;
}
I have to print numbers with max N bits where count of bits set to 1 = count of bits set to 0. I ignoring leading zeros. I thinking that this applies only when count of bits is even.
My code:
int power(k) {
return 1 << k;
}
void print_numbers(int n){
n -= (n % 2); // FOR EVEN COUNT OF BITS
int exp = 1; // EXPONENTS WILL BE ODD (2^1, 2^3, 2^5, ...)
while (exp < n) {
int start = power(exp);
int end = power(exp + 1);
int ones = (exp + 1) / 2; // ALLOWED COUNT OF 1
for (int i = start; i < end; i++) {
int bits_count = 0;
for (int j = 0; j <= exp; j++){ // CHECK COUNT OF 1
bits_count += ((i >> j) & 1);
}
if (bits_count == ones){
printf("%d\n", i);
}
}
exp += 2;
}
For N = 12 this function print 637 numbers. Is this solution correct or am i wrong? Any idea for more efficient or better solution?
I came up with this, which is a totally different approach (and perfectible) but works:
#include <stdio.h>
void checker(int number)
{
int c;
int zeros = 0;
int ones = 0;
for (c = 31; c >= 0; c--)
{
if (number >> c & 1)
{
ones++;
}
else if(ones > 0)
{
zeros++;
}
}
if(zeros == ones)
{
printf("%i\n", number);
}
}
int main()
{
int c;
for (c = 4095; c >= 0; c--)
{
checker(c);
}
return 0;
}
Which get me 638 values (including 0)