I was trying to optimize the Radix Sort code, because I felt there was room for it as traditional codes in books and on web seem a direct copy of one another and also they work very slow as they take an arbitrary number such as 10 for modulo operation. I have optimized the code as far as I could go, maybe I might have missed some optimization techniques. In that case please enlighten me.
Motivation for optimization:
http://codercorner.com/RadixSortRevisited.htm
http://stereopsis.com/radix.html
I was unable to implement all the optimizations in the articles, mostly it was beyond my skills and understanding and lack of sufficient time, if you can feel free to implement them.
EDIT 4
This Java version of Radix Sort calculates all histograms in 1 read and does not need to fill array Z with zeros after every LSB sort along with the usual ability to skip sorting and jump to next LSB sorting if all previous LSB's are same. As usual this is only for 32-bit integers but a 64-bit version can be created from it.
protected static int[] DSC(int A[])// Sorts in descending order
{
int tmp[] = new int[A.length] ;
int Z[] = new int[1024] ;
int i, Jump, Jump2, Jump3, Jump4, swap[] ;
Jump = A[0] & 255 ;
Z[Jump] = 1 ;
Jump2 = ((A[0] >> 8) & 255) + 256 ;
Z[Jump2] = 1 ;
Jump3 = ((A[0] >> 16) & 255) + 512 ;
Z[Jump3] = 1 ;
Jump4 = (A[0] >> 24) + 768 ;
Z[Jump4] = 1 ;
// Histograms creation
for (i = 1 ; i < A.length; ++i)
{
++Z[A[i] & 255] ;
++Z[((A[i] >> 8) & 255) + 256] ;
++Z[((A[i] >> 16) & 255) + 512] ;
++Z[(A[i] >> 24) + 768] ;
}
// 1st LSB Byte Sort
if( Z[Jump] != A.length )
{
Z[0] = A.length - Z[0];
for (i = 1; i < 256; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[A[i] & 255]++] = A[i];
}
swap = A ; A = tmp ; tmp = swap ;
}
// 2nd LSB Byte Sort
if( Z[Jump2] != A.length )
{
Z[256] = A.length - Z[256];
for (i = 257; i < 512; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[((A[i] >> 8) & 255) + 256]++] = A[i];
}
swap = A ; A = tmp ; tmp = swap ;
}
// 3rd LSB Byte Sort
if( Z[Jump3] != A.length )
{
Z[512] = A.length - Z[512];
for (i = 513; i < 768; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[((A[i] >> 16) & 255) + 512]++] = A[i];
}
swap = A ; A = tmp ; tmp = swap ;
}
// 4th LSB Byte Sort
if( Z[Jump4] != A.length )
{
Z[768] = A.length - Z[768];
for (i = 769; i < Z.length; ++i)
{
Z[i] = Z[i - 1] - Z[i];
}
for (i = 0; i < A.length; ++i)
{
tmp[Z[(A[i] >> 24) + 768]++] = A[i];
}
return tmp ;
}
return A ;
}
The Java version ran faster with != sign than == sign
if( Z[Jump] != A.length )
{
// lines of code
}...
but in C the below version was on average, 25% faster (with equalto sign) than its counterpart with != sign. Your hardware might react differently.
if( Z[Jump] == A.length );
else
{
// lines of code
}...
Below is the C code ( "long" on my machine is 32 bits )
long* Radix_2_ac_long(long *A, size_t N, long *Temp)// Sorts in ascending order
{
size_t Z[1024] = {0};
long *swp;
size_t i, Jump, Jump2, Jump3, Jump4;
// Sort-circuit set-up
Jump = *A & 255;
Z[Jump] = 1;
Jump2 = ((*A >> 8) & 255) + 256;
Z[Jump2] = 1;
Jump3 = ((*A >> 16) & 255) + 512;
Z[Jump3] = 1;
Jump4 = (*A >> 24) + 768;
Z[Jump4] = 1;
// Histograms creation
for(i = 1 ; i < N ; ++i)
{
++Z[*(A+i) & 255];
++Z[((*(A+i) >> 8) & 255) + 256];
++Z[((*(A+i) >> 16) & 255) + 512];
++Z[(*(A+i) >> 24) + 768];
}
// 1st LSB byte sort
if( Z[Jump] == N );
else
{
for( i = 1 ; i < 256 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[*(A+i) & 255] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 2nd LSB byte sort
if( Z[Jump2] == N );
else
{
for( i = 257 ; i < 512 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[((*(A+i) >> 8) & 255) + 256] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 3rd LSB byte sort
if( Z[Jump3] == N );
else
{
for( i = 513 ; i < 768 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[((*(A+i) >> 16) & 255) + 512] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 4th LSB byte sort
if( Z[Jump4] == N );
else
{
for( i = 769 ; i < 1024 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i < N ; --i )
{
*(--Z[(*(A+i) >> 24) + 768] + Temp) = *(A+i);
}
return Temp;
}
return A;
}
EDIT 5
The sort now handles negative numbers too. Only some minor/negligible tweaks to the code did it. It runs a little slower as a result but the effect is not significant. Coded in C, below ( "long" on my system is 32 bits )
long* Radix_Sort(long *A, size_t N, long *Temp)
{
size_t Z[1024] = {0};
long *swp;
size_t Jump, Jump2, Jump3, Jump4;
long i;
// Sort-circuit set-up
Jump = *A & 255;
Z[Jump] = 1;
Jump2 = ((*A >> 8) & 255) + 256;
Z[Jump2] = 1;
Jump3 = ((*A >> 16) & 255) + 512;
Z[Jump3] = 1;
Jump4 = ((*A >> 24) & 255) + 768;
Z[Jump4] = 1;
// Histograms creation
for(i = 1 ; i < N ; ++i)
{
++Z[*(A+i) & 255];
++Z[((*(A+i) >> 8) & 255) + 256];
++Z[((*(A+i) >> 16) & 255) + 512];
++Z[((*(A+i) >> 24) & 255) + 768];
}
// 1st LSB byte sort
if( Z[Jump] == N );
else
{
for( i = 1 ; i < 256 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[*(A+i) & 255] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 2nd LSB byte sort
if( Z[Jump2] == N );
else
{
for( i = 257 ; i < 512 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[((*(A+i) >> 8) & 255) + 256] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 3rd LSB byte sort
if( Z[Jump3] == N );
else
{
for( i = 513 ; i < 768 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[((*(A+i) >> 16) & 255) + 512] + Temp) = *(A+i);
}
swp = A;
A = Temp;
Temp = swp;
}
// 4th LSB byte sort and negative numbers sort
if( Z[Jump4] == N );
else
{
for( i = 897 ; i < 1024 ; ++i )// -ve values frequency starts after index 895, i.e at 896 ( 896 = 768 + 128 ), goes upto 1023
{
Z[i] = Z[i-1] + Z[i];
}
Z[768] = Z[768] + Z[1023];
for( i = 769 ; i < 896 ; ++i )
{
Z[i] = Z[i-1] + Z[i];
}
for( i = N-1 ; i >= 0 ; --i )
{
*(--Z[((*(A+i) >> 24) & 255) + 768] + Temp) = *(A+i);
}
return Temp;
}
return A;
}
EDIT 6
Below is the pointer optimized version ( accesses array locations via pointers ) that takes on average, approximately 20% less time to sort than the one above. It also uses 4 separate arrays for faster address calculation ( "long" on my system is 32 bits ).
long* Radix_Sort(long *A, size_t N, long *Temp)
{
long Z1[256] ;
long Z2[256] ;
long Z3[256] ;
long Z4[256] ;
long T = 0 ;
while(T != 256)
{
*(Z1+T) = 0 ;
*(Z2+T) = 0 ;
*(Z3+T) = 0 ;
*(Z4+T) = 0 ;
++T;
}
size_t Jump, Jump2, Jump3, Jump4;
// Sort-circuit set-up
Jump = *A & 255 ;
Z1[Jump] = 1;
Jump2 = (*A >> 8) & 255 ;
Z2[Jump2] = 1;
Jump3 = (*A >> 16) & 255 ;
Z3[Jump3] = 1;
Jump4 = (*A >> 24) & 255 ;
Z4[Jump4] = 1;
// Histograms creation
long *swp = A + N;
long *i = A + 1;
for( ; i != swp ; ++i)
{
++Z1[*i & 255];
++Z2[(*i >> 8) & 255];
++Z3[(*i >> 16) & 255];
++Z4[(*i >> 24) & 255];
}
// 1st LSB byte sort
if( Z1[Jump] == N );
else
{
swp = Z1+256 ;
for( i = Z1+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A-1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z1[*i & 255] + Temp) = *i;
}
swp = A;
A = Temp;
Temp = swp;
}
// 2nd LSB byte sort
if( Z2[Jump2] == N );
else
{
swp = Z2+256 ;
for( i = Z2+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A-1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z2[(*i >> 8) & 255] + Temp) = *i;
}
swp = A;
A = Temp;
Temp = swp;
}
// 3rd LSB byte sort
if( Z3[Jump3] == N );
else
{
swp = Z3 + 256 ;
for( i = Z3+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A-1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z3[(*i >> 16) & 255] + Temp) = *i;
}
swp = A;
A = Temp;
Temp = swp;
}
// 4th LSB byte sort and negative numbers sort
if( Z4[Jump4] == N );
else
{
swp = Z4 + 256 ;
for( i = Z4+129 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
*Z4 = *Z4 + *(Z4+255) ;
swp = Z4 + 128 ;
for( i = Z4+1 ; i != swp ; ++i )
{
*i = *(i-1) + *i;
}
swp = A - 1;
for( i = A+N-1 ; i != swp ; --i )
{
*(--Z4[(*i >> 24) & 255] + Temp) = *i;
}
return Temp;
}
return A;
}
The edit 4 version is good enough if the original and temp arrays fit in cache. If the array size is much greater than cache size, most of the overhead is due to the random order writes to the arrays. A hybrid msb/lsb radix sort can avoid this issue. For example split the array into 256 bins according to the most significant byte, then do a lsb radix sort on each of the 256 bins. The idea here is that a pair (original and temp) of bins will fit within the cache, where random order writes are not an issue (for most cache implementations).
For a 8MB cache, the goal is for each of the bins to be < 4MB in size = 1 million 32 bit integers if the integers evenly distribute into the bins. This strategy would work for array size up to 256 million 32 bit integers. For larger arrays, the msb phase could split up the array into 1024 bins, for up to 1 billion 32 bit integers. On my system, sorting 16,777,216 (2^24) 32 bit integers with a classic 8,8,8,8 lsb radix sort took 0.45 seconds, while the hybrid 8 msb : 8,8,8 lsb took 0.24 seconds.
// split array into 256 bins according to most significant byte
void RadixSort(uint32_t * a, size_t count)
{
size_t aIndex[260] = {0}; // count / array
uint32_t * b = new uint32_t [count]; // allocate temp array
size_t i;
for(i = 0; i < count; i++) // generate histogram
aIndex[1+((size_t)(a[i] >> 24))]++;
for(i = 2; i < 257; i++) // convert to indices
aIndex[i] += aIndex[i-1];
for(i = 0; i < count; i++) // sort by msb
b[aIndex[a[i]>>24]++] = a[i];
for(i = 256; i; i--) // restore aIndex
aIndex[i] = aIndex[i-1];
aIndex[0] = 0;
for(i = 0; i < 256; i++) // radix sort the 256 bins
RadixSort3(&b[aIndex[i]], &a[aIndex[i]], aIndex[i+1]-aIndex[i]);
delete[] b;
}
// sort a bin by 3 least significant bytes
void RadixSort3(uint32_t * a, uint32_t *b, size_t count)
{
size_t mIndex[3][256] = {0}; // count / matrix
size_t i,j,m,n;
uint32_t u;
if(count == 0)
return;
for(i = 0; i < count; i++){ // generate histograms
u = a[i];
for(j = 0; j < 3; j++){
mIndex[j][(size_t)(u & 0xff)]++;
u >>= 8;
}
}
for(j = 0; j < 3; j++){ // convert to indices
m = 0;
for(i = 0; i < 256; i++){
n = mIndex[j][i];
mIndex[j][i] = m;
m += n;
}
}
for(j = 0; j < 3; j++){ // radix sort
for(i = 0; i < count; i++){ // sort by current lsb
u = a[i];
m = (size_t)(u>>(j<<3))&0xff;
b[mIndex[j][m]++] = u;
}
std::swap(a, b); // swap ptrs
}
}
Example code for classic lsb radix sorts:
Example C++ lsb radix sort using 8,8,8,8 bit fields:
typedef unsigned int uint32_t;
void RadixSort(uint32_t * a, size_t count)
{
size_t mIndex[4][256] = {0}; // count / index matrix
uint32_t * b = new uint32_t [count]; // allocate temp array
size_t i,j,m,n;
uint32_t u;
for(i = 0; i < count; i++){ // generate histograms
u = a[i];
for(j = 0; j < 4; j++){
mIndex[j][(size_t)(u & 0xff)]++;
u >>= 8;
}
}
for(j = 0; j < 4; j++){ // convert to indices
m = 0;
for(i = 0; i < 256; i++){
n = mIndex[j][i];
mIndex[j][i] = m;
m += n;
}
}
for(j = 0; j < 4; j++){ // radix sort
for(i = 0; i < count; i++){ // sort by current lsb
u = a[i];
m = (size_t)(u>>(j<<3))&0xff;
b[mIndex[j][m]++] = u;
}
std::swap(a, b); // swap ptrs
}
delete[] b;
}
Example C++ code using 16,16 bit fields:
typedef unsigned int uint32_t;
uint32_t * RadixSort(uint32_t * a, size_t count)
{
size_t mIndex[2][65536] = {0}; // count / index matrix
uint32_t * b = new uint32_t [count]; // allocate temp array
size_t i,j,m,n;
uint32_t u;
for(i = 0; i < count; i++){ // generate histograms
u = a[i];
for(j = 0; j < 2; j++){
mIndex[j][(size_t)(u & 0xffff)]++;
u >>= 16;
}
}
for(j = 0; j < 2; j++){ // convert to indices
m = 0;
for(i = 0; i < 65536; i++){
n = mIndex[j][i];
mIndex[j][i] = m;
m += n;
}
}
for(j = 0; j < 2; j++){ // radix sort
for(i = 0; i < count; i++){ // sort by current lsb
u = a[i];
m = (size_t)(u>>(j<<4))&0xffff;
b[mIndex[j][m]++] = u;
}
std::swap(a, b); // swap ptrs
}
delete[] b;
return(a);
}
N & 15 , N & 31 , N & 63 .... and so on , which of these bitwise
operations takes least time?
They are same. Do not take it bad, but optimizing for speed without knowing how long things last may end up quite bad. And even when you know the timing, hardware is very complicated nowadays and quite unpredictable. You program in java, that is another layer of insanely complex system. The same code may be faster today and slower tomorrow. Your say approximately 2.232891909840167 times faster. In reality, you have measurement on one hardware and software configuration with one set of data and you can only hope the measurement is representative enough. Unfortunately, it is not always the case.
I rewrote your function. It is shorter and simpler, yet does not seem to be slower. Compilers tend to like code that is not too clever, as there are many optimizations for simple cases. The correction for negative numbers is not particulary nice, you can delete it if you do not like it. It seems to work best for 8 bits and 11 bits, probably due to cache sizes, have a look at comments of rcgldr.
EDIT
#ytoamn you are right, if all is in the first bucket the loop should continue, not break. That was a bug. To the other changes, I would rather avoid the contract you have done now. I think there are three natural contracts for sorting function. First one is sorting the original array and returning null. Second is sorting the original array and return it. The third is returning new sorted array and keeping the original array intact. I like the first one, as its behaviour is unambiguous. The way you have it now you should add big warning to the documentation, that the original array has changed and is returned from the function is some cases and in other not. Second thing I would avoid is the old C code style. You should define loop variable in the loop if you need it only there. Defining it globally injects dependency that may lead to bugs. And it has no advantages here, as properly defined loop variables would share the space in the end anyway. Compiler is well aware of the scope, you should use the smallest scope you need.
EDIT2
Feel free to comment directly under my post :-) Local variables are just addresses on the stack. You allocate memory when constructing object which is not the case here. As for the array, think about this code:
public static void Tst(int[] A) {
int[] tmp = new int[A.length];
A[0] = 6;
A = tmp; // changes what parameter A contains
A[0] = 7;
}
public static void main(String[] args) {
int[] A = new int[1];
A[0] = 5;
Tst(A);
System.out.println(A[0]); //prints 6
}
It prints 6. Number 7 is written into tmp array only. Array A in main is not affected.
protected static void ASC2(int A[], int bits) {
int[] origA = A;
int[] tmp = new int[A.length];
int[] Z = new int[1 << bits];
int mask = (1 << bits) - 1;
for (int shift = 0; shift < 32; shift += bits) {
if (shift > 0) {
Arrays.fill(Z, 0);
}
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> shift) & mask]++;
}
if (Z[0] == A.length) {
continue; // all in first bucket
}
Z[Z.length - 1] = A.length - Z[Z.length - 1];
for (int i = Z.length - 2; i >= 0; --i) {
Z[i] = Z[i + 1] - Z[i];
}
if (shift + bits > 31) { // negative numbers correction
int halfLength = Z.length / 2;
int positSum = Z[halfLength];
int negSum = A.length - positSum;
if (negSum > 0) {
for (int i = 0; i < halfLength; ++i) {
Z[i] += negSum;
}
for (int i = halfLength; i < Z.length; ++i) {
Z[i] -= positSum;
}
}
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> shift) & mask]++] = A[i];
}
int[] swap = A;
A = tmp;
tmp = swap;
}
if (A != origA) {
System.arraycopy(A, 0, origA, 0, A.length);
}
}
EDIT3
Loop unroll is a valid technique, improving short circuiting is really nice. But with using array lengths as constants you definitely start to be too clever. If you hard coded the base size, why not hard code it all like this:
protected static int[] DSC2(int A[])// sorts in descending order
{
int tmp[] = new int[A.length];
int Z[] = new int[256];
int sample, swap[];
// 1st LSB byte extraction
sample = A[0] & 255;
for (int i = 0; i < A.length; ++i) {
Z[A[i] & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[A[i] & 255]++] = A[i];
}
swap = A;
A = tmp;
tmp = swap;
Arrays.fill(Z, 0);
} else {
Z[sample] = 0;
}
// 2nd LSB byte extraction
sample = (A[0] >> 8) & 255;
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> 8) & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> 8) & 255]++] = A[i];
}
swap = A;
A = tmp;
tmp = swap;
Arrays.fill(Z, 0);
} else {
Z[sample] = 0;
}
// 3rd LSB byte extraction
sample = (A[0] >> 16) & 255;
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> 16) & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> 16) & 255]++] = A[i];
}
swap = A;
A = tmp;
tmp = swap;
Arrays.fill(Z, 0);
} else {
Z[sample] = 0;
}
// 4th LSB byte extraction
sample = (A[0] >> 24) & 255;
for (int i = 0; i < A.length; ++i) {
Z[(A[i] >> 24) & 255]++;
}
if (Z[sample] != A.length) {
Z[0] = A.length - Z[0];
for (int i = 1; i < Z.length; ++i) {
Z[i] = Z[i - 1] - Z[i];
}
for (int i = 0; i < A.length; ++i) {
tmp[Z[(A[i] >> 24) & 255]++] = A[i];
}
A = tmp;
}
return A;
}
Related
I was trying to solve the problem where if I was given an array of int which allows duplicate,
I need to find the count of how many permutation of this array there are such that each adjacent pair of integer in the array whose sum is a perfect square. I have derived the dp solution, but it was wrong, I looked at the solution, it was very similar to mine but with slight difference, can someone please look at it and tell me why mines won't work but the sample answer does?
My train of thought is, if I have g[i][j] == 1 telling me i and j forms a pair whose sum is a perfect square, 0 otherwise. And I have a dp[s][j] tells me if my node visited state is s, whose binary representation tells me all the node index that I have visited, and it ends in node with index j, I need DP for hamitonian path in a graph to calculate all possible ways to reach state s that ends in node index j, then the answer will be the sum of state s where all node is visited and it ends in node from 0 to n - 1. In terms of avoiding duplicate, I sort the input array of number and if in same layer of search, if we have nums[i - 1] == nums[i] but we have not visited nums[i], it means we just back out from an earlier dfs that visited the same number, we will not do that again.
I will paste the code here
The below is my answer, which will fail if the array contains duplicate
int n = nums.length;
int[][] dp = new int[1 << n][n];
int[][] g = new int[n][n];
Arrays.sort(nums);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
if(i != j && (Math.ceil(Math.sqrt(nums[i] + nums[j])) ==
Math.sqrt(nums[i] + nums[j]))) {
g[i][j] = 1;
}
}
}
for(int s = 0; s < (1 << n); s++) {
for(int j = 0; j < n; j++) {
if(s == (1 << j)) {
dp[1 << j][j] = (j == 0 || nums[j - 1] != nums[j]) ? 1 : 0;
continue;
}
if((s & (1 << j)) == 0) continue;
for(int i = 0; i < n; i++) {
if(g[i][j] == 0) continue;
if((s & (1 << i)) == 0) continue;
if(i > 0 && nums[i - 1] == nums[i] && ((s & (1 << (i - 1))) == 0)) continue;
dp[s][j] += dp[s & ~(1 << j)][i];
}
}
}
int res = 0;
int finish = (1 << n) - 1;
for(int i = 0; i < n; i++) {
res += dp[finish][i];
}
return res;
The below is the sample answer:
int n = nums.length;
boolean[][] g = new boolean[n][n];
int[][] dp = new int[1 << n][n];
Arrays.sort(nums);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
if(Math.floor(Math.sqrt(nums[i] + nums[j])) * Math.floor(Math.sqrt(nums[i] + nums[j])) == nums[i] + nums[j]) {
g[i][j] = g[j][i] = true;
}
}
}
for(int i = 0; i < nums.length; i++) {
if(i == 0 || (i > 0 && nums[i - 1] != nums[i]))
dp[1 << i][i] = 1;
}
for(int s = 0; s < (1 << n); s++) {
for(int j = 0; j < n; j++) {
if((s & (1 << j)) == 0) continue;
for(int i = 0; i < n; i++) {
if(g[i][j]) {
if((s & (1 << i)) != 0) continue;
if(i > 0 && nums[i] == nums[i - 1] && ((s & (1 << (i - 1))) == 0)) continue;
dp[s | (1 << i)][i] += dp[s][j];
}
}
}
}
int ans = 0;
for(int l = 0; l < n; l++) {
ans += dp[(1 << n) - 1][l];
}
return ans;
I am trying to sort floats with radix. My current algorithm works with unsigned. For example, if I enter values 12, 100, 1 my sorted values are 1, 12, and 100. However, when I use a function to convert floats to ints back to floats after calling the radix sort, my values remain unsorted. They print as they were entered by the user.
I am unsure how to modify my current function to be able to sort floats with radix.
void rs(unsigned int *a, int c) {
int i;
int m = a[0];
int bt = 0;
unsigned int *b = malloc(0 * sizeof(int));
for (i = 0; i < c; i++) {
if (a[i] > m)
m = a[i];
}
while((m>>bt) > 0){
int buck[2] = { 0 };
for (i = 0; i < c; i++) {
buck[(a[i]>>bt)&1]++;
}
for (i = 1; i < 2; i++) {
buck[i] += buck[i-1];
}
for (i = c-1; i >= 0; i--) {
b[--buck[(a[i]>>bt)&1]] = a[i];
}
for (i = 0; i < c; i++) {
a[i] = b[i];
}
bt++;
}
free(b);
}
The function I am using to transform floats to ints to floats is: Radix Sort for Floats
void rfloat(float* arr, size_t size) {
assert(sizeof(unsigned) == sizeof(float) && sizeof(float) == 4);
unsigned* d = malloc(size * sizeof(unsigned));
for (size_t i = 0; i < size; i++) {
// Interpret float as 32-bit unsigned.
d[i] = *(unsigned*) &(arr[i]);
// Flip all except top if top bit is set.
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
// Flip top bit.
d[i] ^= (1u << 31);
}
rs(d, size);
// Inverse transform.
for (size_t i = 0; i < size; i++) {
d[i] ^= (1u << 31);
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
arr[i] = *(float*) &(d[i]);
}
free(d);
}
There's multiple issues.
You use int all over the place where you should be using unsigned (for values) or size_t (for sizes/indices).
You allocate 0 bytes.
(m >> bt) > 0 doesn't work as a stop condition, shifting bits equal or greater than the width is not specified.
After transforming the data types to unsigned the loop boundaries don't work anymore.
I took the liberty of fixing the above and choosing some better variable names:
#include <limits.h>
void rs(unsigned int *a, size_t c) {
size_t i;
unsigned bit = 0;
unsigned *b = malloc(c * sizeof(unsigned));
unsigned m = a[0]; // Max element.
for (i = 0; i < c; i++) {
if (a[i] > m) m = a[i];
}
while (bit < CHAR_BIT*sizeof(m) && (m >> bit)) {
size_t bucket_len[2] = { 0, 0 };
for (i = 0; i < c; i++) bucket_len[(a[i] >> bit) & 1]++;
size_t bucket_end[2] = {bucket_len[0], bucket_len[0] + bucket_len[1]};
for (i = c; i-- > 0; ) {
size_t j = --bucket_end[(a[i] >> bit) & 1];
b[j] = a[i];
}
for (i = 0; i < c; i++) a[i] = b[i];
bit++;
}
free(b);
}
Is it possible to generalize this Radix Sort code to look at only the 40 most significant bits of uint64_t data?
Generalizing the 32 sort bit code from user RGCLGR to 48 and 64 bits and comparing sorting a uint64_t[36M] on 64, 48 and 32 bits:
Time= 3.130 sec = 20.342%, RADIX_SORT_UINT64_REG, hits=4, 0.782 sec each
Time= 2.336 sec = 15.180%, RADIX_SORT_UINT64_48R, hits=4, 0.584 sec each
Time= 1.540 sec = 10.007%, RADIX_SORT_UINT64_32R, hits=4, 0.385 sec each
This confirms the expected linearity between bits sorted and time to sort.
I have a need to sort hundreds of uint64_t[]s on only the 34 Most Significant Bits. The 48 MSB sort works, but sorting on only 40 bits should take ~5/6 as long. This could reduce a 58 second travail to a mere 48 second ordeal for the user.
The difference between the 32 MSB code and the 48 MSB code is mostly slight variations except for one code segment:
32 bit code segment radix sorting mIndex [0, 1, 2, 3]:
for (i = 0; i < count; i++) { /* radix sort */
u = pData[i];
pTemp[mIndex[3][(u >> 32) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pTemp[i];
pData[mIndex[2][(u >> 40) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pData[i];
pTemp[mIndex[1][(u >> 48) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pTemp[i];
pData[mIndex[0][(u >> 56) & 0xff]++] = u;
}
The 48 bit segment prepends this code to handle mIndex [4, 5]:
for (i = 0; i < count; i++) { /* radix sort */
u = pData[i];
pTemp[mIndex[5][(u >> 16) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pTemp[i];
pData[mIndex[4][(u >> 24) & 0xff]++] = u;
}
Converting to full 64 bit sorting adds similar code to operate on matrix indexes [6, 7]
Is it even possible to add mIndex[4] to create a 40 MSB sort?
The pData array is used with even mIndex indexes.
The pTemp array is used with odd mIndex indexes.
Is this method limited to being generalized only for even byte counts?
===================================
Full code for sorting a uint64[] on the 32 most significant bits:
// From code submitted by on stackoverflow.com rcgldr, Nov 3 2017
void radix_sort_r64_32(uint64_t *pData, uint64_t *pTemp, size_t count,
EV_TIME_STR *tsa)
{
size_t mIndex[4][256] = { 0 }; /* index matrix */
size_t * pmIndex; /* ptr to row of matrix */
size_t i, j, m, n;
uint64_t u;
if(tsa) time_event(E_RADIX_SORT_UINT64_32R, tsa, E_TIME_EVENT, 1, 0);
for (i = 0; i < count; i++) { /* generate histograms */
u = pData[i];
mIndex[3][(u >> 32) & 0xff]++;
mIndex[2][(u >> 40) & 0xff]++;
mIndex[1][(u >> 48) & 0xff]++;
mIndex[0][(u >> 56) & 0xff]++;
}
for (j = 0; j < 4; j++) { /* convert to indices */
pmIndex = mIndex[j];
n = 0;
for (i = 0; i < 256; i++) {
m = pmIndex[i];
pmIndex[i] = n;
n += m;
}
}
for (i = 0; i < count; i++) { /* radix sort */
u = pData[i];
pTemp[mIndex[3][(u >> 32) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pTemp[i];
pData[mIndex[2][(u >> 40) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pData[i];
pTemp[mIndex[1][(u >> 48) & 0xff]++] = u;
}
for (i = 0; i < count; i++) {
u = pTemp[i];
pData[mIndex[0][(u >> 56) & 0xff]++] = u;
}
} // End Radix_Sort_R64_32().
======================
And the diff between the 32 bit and the 48 bit sort versions:
diff ~/tmp/radix.sort.32.c ~/tmp/radix.sort.48.c
< < void radix_sort_r64_32(uint64_t *pData, uint64_t *pTemp, size_t count,
< ---
< > void radix_sort_r64_48(uint64_t *pData, uint64_t *pTemp, size_t count,
<
< < size_t mIndex[4][256] = { 0 }; /* index matrix */
< ---
< > size_t mIndex[6][256] = { 0 }; /* index matrix */
<
< < if(tsa) time_event(E_RADIX_SORT_UINT64_32R, tsa, E_TIME_EVENT, 1, 0);
< ---
< > if(tsa) time_event(E_RADIX_SORT_UINT64_48R, tsa, E_TIME_EVENT, 1, 0);
<
< ---
< > mIndex[5][(u >> 16) & 0xff]++; // B2
< > mIndex[4][(u >> 24) & 0xff]++; // B3
<
< < for (j = 0; j < 4; j++) { /* convert to indices */
< ---
< > for (j = 0; j < 6; j++) { /* convert to indices */
<
< > pTemp[mIndex[5][(u >> 16) & 0xff]++] = u;
< > }
< > for (i = 0; i < count; i++) { /* radix sort */
< > u = pTemp[i];
< > pData[mIndex[4][(u >> 24) & 0xff]++] = u;
< > }
< > for (i = 0; i < count; i++) { /* radix sort */
< > u = pData[i];
< 44c56
< < } // End Radix_Sort_R64_32().
< ---
< > } // End Radix_Sort_R64_48().
Executive summary of unique differences:
Unique lines from "~/tmp/radix.sort.32.c":
02) void radix_sort_r64_32(uint64_t *pData, uint64_t *pTemp, size_t count,
05) size_t mIndex[4][256] = { 0 }; /* index matrix */
19) for (j = 0; j < 4; j++) { /* convert to indices */
Unique lines from "~/tmp/radix.sort.48.c":
01) void radix_sort_r64_48(uint64_t *pData, uint64_t *pTemp, size_t count,
04) size_t mIndex[6][256] = { 0 }; /* index matrix */
14) mIndex[5][(u >> 16) & 0xff]++; // B2
15) mIndex[4][(u >> 24) & 0xff]++; // B3
22) for (j = 0; j < 6; j++) { /* convert to indices */
34) pTemp[mIndex[5][(u >> 16) & 0xff]++] = u;
38) pData[mIndex[4][(u >> 24) & 0xff]++] = u;
After debugging, trying different image softwares (xdg, gimp) I persist to have a bug which throws me off completely.
Problem is about convolution in PPM format, for images different in aspect ratio, I'm using 1500x1000px image, where mask of {0,0,0, 0,1,0, 0,0,0} works just fine (it's just copying image), however for mask where first or last row is different than 0 eg. {0,1,0, 0,0,0, 0,0,0} image is moved by 1/3 of its size rightwards. I find it peculiar, because as far as I know, I do not have an overflow or any pointer arithmetic that might cause this problem.
I've narrowed it down to the kernel of convolution. Afaik I do not have any problems saving, reading image, after running i_convolution it just moves image by predefined value?.
void i_convolution(unsigned int **in, unsigned int ***out,
int y_max, int x_max, int kernel_size)
{
int kernel_sum = 0;
for(int i = 0; i < kernel_size; i++)
{
for(int j = 0; j < kernel_size; j++)
{
kernel_sum += kernel[i * kernel_size + j];
}
}
printf("kernel sum = %d\n", kernel_sum);
for (int i = 1; i < y_max - 1; i++)
{
for (int j = 1; j < x_max - 1; j++)
{
int r = 0;
int g = 0;
int b = 0;
for (int y_conv = -1; y_conv <= 1; y_conv++)
{
for (int x_conv = -1; x_conv <= 1; x_conv++)
{
int y_index = i + y_conv;
int x_index = j + x_conv;
unsigned char rval = (unsigned char)(in[y_index][x_index] & 0xff);
unsigned char gval = (unsigned char)((in[y_index][x_index] & 0xff00) >> 8);
unsigned char bval = (unsigned char)((in[y_index][x_index] & 0xff0000) >> 16);
int kernel_val = kernel[(y_conv + 1)*kernel_size + (x_conv + 1)];
r += (int)(rval * kernel_val);
g += (int)(gval * kernel_val);
b += (int)(bval * kernel_val);
}
}
r /= kernel_sum;//median filtration
g /= kernel_sum;//median filtration
b /= kernel_sum;//median filtration
// b = abs(b);
if (r > 255) r = 255;
else if(r < 0) r = 0;
if (g > 255) g = 255;
else if(g < 0) g = 0;
if (b > 255) b = 255;
else if(b < 0) b = 0;
unsigned int val;
val = 0;
val |= b & 0xff;
val <<= 8;
val |= g & 0xff;
val <<= 8;
val |= r & 0xff;
(*out)[i][j] = val;
}
}
}
let's take kernel {0, 1, 0, 0, 0, 0,
result are like this, with left being original, right after convolution
https://i.imgur.com/rzXKjUY.png
I will be thankful for any help.
Best regards.
I mark it as solved, because there was a problem with me misinterpreting PPM format height and width, which caused this behaviour, swapping y with x (and allocating memory as such) solves it!
I just started using a microcontroller and I have to implement encryption/decryption in it. Sorry for the super long post.
This is the python script and do not need to be edited.
DEVPATH = "/dev"
TTYPREFIX = "ttyACM"
INPUT = b"Hello!"
#OUTPUT = b"Ifmmp!"
if __name__=='__main__':
for tty in (os.path.join(DEVPATH,tty) for tty in os.listdir(DEVPATH) \
if tty.startswith(TTYPREFIX)):
try:
ctt = serial.Serial(tty, timeout=1, writeTimeout=1)
except serial.SerialException:
continue
ctt.flushInput()
ctt.flushOutput()
# print(ctt)
try:
ctt.write(INPUT)
except serial.SerialTimeoutException:
ctt.__exit__()
continue
for retry in range(3): # Try three times to read connection test result
ret = ctt.read(2*len(INPUT))
print("ret: " + repr(ret))
if INPUT in ret:
sys.exit(0)
break
else:
ctt.__exit__()
continue
break
else:
print("Failed")
sys.exit(1)
This is the main.c file. I know that CDC_Device_BytesReceived will receive the input from the python script. And if there are input, it will run the while loop since Bytes will be more than 0.
while (1)
{
/* Check if data received */
Bytes = CDC_Device_BytesReceived(&VirtualSerial_CDC_Interface);
while(Bytes > 0)
{
/* Send data back to the host */
ch = CDC_Device_ReceiveByte(&VirtualSerial_CDC_Interface);
CDC_Device_SendByte(&VirtualSerial_CDC_Interface, ch);
--Bytes;
}
CDC_Device_USBTask(&VirtualSerial_CDC_Interface);
}
return 0;
}
However, in the loop, I was tasked to add a switch case so that it will switch between encryption and decryption. But I have no idea what kind of condition to use to differentiate the encryption and decryption.
This is the code for encryption.
int crypto_aead_encrypt(unsigned char* c, unsigned long long* clen,
const unsigned char* m, unsigned long long mlen,
const unsigned char* ad, unsigned long long adlen,
const unsigned char* nsec, const unsigned char* npub,
const unsigned char* k)
{
int klen = CRYPTO_KEYBYTES; // 16 bytes
int size = 320 / 8; // 40 bytes
int rate = 128 / 8; // 16 bytes
// int capacity = size - rate;
// Permutation
int a = 12;
int b = 8;
// Padding process appends a 1 to the associated data
i64 s = adlen / rate + 1;
// Padding process appends a 1 to the plain text
i64 t = mlen / rate + 1;
// Length = plaintext mod r
// i64 l = mlen % rate;
u8 S[size];
// Resulting Padded associated data is split into s blocks of r bits
u8 A[s * rate];
// Resulting Padded plain text is split into t blocks of r bits
u8 P[t * rate];
i64 i, j;
// Pad Associated Data
for(i = 0; i < adlen; ++i)
{
A[i] = ad[i];
A[adlen] = 0x80; // 128 bits
// No Padding Applied
for(i = adlen + 1; i < s * rate; ++i)
{
A[i] = 0;
}
}
// Pad Plaintext
for(i = 0; i < mlen; ++i)
{
P[i] = m[i];
P[mlen] = 0x80; // 128 bits
// No Padding Applied
for(i = mlen + 1; i < t * rate; ++i)
{
P[i] = 0;
}
}
// Initialization
// IV = k || r || a || b || 0
// S = IV || K || N
S[0] = klen * 8;
S[1] = rate * 8;
S[2] = a;
S[3] = b;
// i < 40 - 2 * 16 = 8
for(i = 4; i < size - 2 * klen; ++i)
{
// S[4] until S[7] = 0
S[i] = 0;
}
// i < 16
for(i = 0; i < klen; ++i)
{
// S[8] = k[0], S[9] = k[1] until S[23] = k[15]
S[size - 2 * klen + i] = k[i];
}
// i < 16
for(i = 0; i < klen; i++)
{
// S[24] = npub[0], S[25] = npub[1] until S[39] = npub[15]
S[size - klen + i] = npub[i];
}
printstate("Initial Value: ", S);
// S - state, 12-a - start, a - 12 rounds
permutations(S, 12 - a, a);
// i < 16
for(i = 0; i < klen; ++i)
{
// S[24] ^= k[0], S[25] ^= k[1] until S[39] ^= k[15]
S[size - klen + i] ^= k[i];
}
printstate("Initialization: ", S);
// Process Associated Data
if(adlen != 0)
{
// i < s = (adlen / rate + 1)
for(i = 0; i < s; ++i)
{
// rate = 16
for(j = 0; j < rate; ++i)
{
// S ^= A
S[j] ^= A[i * rate + j];
}
// S - state, 12-b - start, b - 8 rounds
permutations(S, 12 - b, b);
}
}
// S <- S ^= 1
S[size - 1] ^= 1;
printstate("Process Associated Data: ", S);
// Process Plain Text
for(i = 0; i < t - 1; ++i)
{
for(j = 0; j < rate; ++j)
{
// S <- S ^= P
S[j] ^= P[i * rate + j];
// c <- S
c[i * rate + j] = S[j];
}
// S <- permutation b (S)
permutations(S, 12 - b, b);
}
for(j = 0; j < rate; ++j)
{
// S <- S ^= Pt
S[j] ^= P[(t-1) * rate + j];
}
for(j = 0; j < 1; ++j);
{
// C <- S
// Bitstring S truncated to the first (most significant) k bits
c[(t - 1) * rate + j] = S[j];
}
printstate("Process Plaintext: ", S);
// Finalization
for(i = 0; i < klen; ++i)
{
S[rate + i] ^= k[i];
}
permutations(S, 12 - a, a);
for(i = 0; i < klen; ++i)
{
// T <- S ^= k
// Bitstring S truncated to the last (least significant) k bits
S[size - klen + i] ^= k[i];
}
printstate("Finalization: ", S);
// Return Cipher Text & Tag
for(i = 0; i < klen; ++i)
{
c[mlen + i] = S[size - klen + i];
}
*clen = mlen + klen;
return 0;
}
and the code for decryption
int crypto_aead_decrypt(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec, const unsigned char *c,
unsigned long long clen, const unsigned char *ad,
unsigned long long adlen, const unsigned char *npub,
const unsigned char *k)
{
*mlen = 0;
if (clen < CRYPTO_KEYBYTES)
return -1;
int klen = CRYPTO_KEYBYTES;
// int nlen = CRYPTO_NPUBBYTES;
int size = 320 / 8;
int rate = 128 / 8;
// int capacity = size - rate;
int a = 12;
int b = 8;
i64 s = adlen / rate + 1;
i64 t = (clen - klen) / rate + 1;
i64 l = (clen - klen) % rate;
u8 S[size];
u8 A[s * rate];
u8 M[t * rate];
i64 i, j;
// pad associated data
for (i = 0; i < adlen; ++i)
{
A[i] = ad[i];
}
A[adlen] = 0x80;
for (i = adlen + 1; i < s * rate; ++i)
{
A[i] = 0;
}
// initialization
S[0] = klen * 8;
S[1] = rate * 8;
S[2] = a;
S[3] = b;
for (i = 4; i < size - 2 * klen; ++i)
{
S[i] = 0;
}
for (i = 0; i < klen; ++i)
{
S[size - 2 * klen + i] = k[i];
}
for (i = 0; i < klen; ++i)
{
S[size - klen + i] = npub[i];
}
printstate("initial value:", S);
permutations(S, 12 - a, a);
for (i = 0; i < klen; ++i)
{
S[size - klen + i] ^= k[i];
}
printstate("initialization:", S);
// process associated data
if (adlen)
{
for (i = 0; i < s; ++i)
{
for (j = 0; j < rate; ++j)
{
S[j] ^= A[i * rate + j];
}
permutations(S, 12 - b, b);
}
}
S[size - 1] ^= 1;
printstate("process associated data:", S);
// process plaintext
for (i = 0; i < t - 1; ++i)
{
for (j = 0; j < rate; ++j)
{
M[i * rate + j] = S[j] ^ c[i * rate + j];
S[j] = c[i * rate + j];
}
permutations(S, 12 - b, b);
}
for (j = 0; j < l; ++j)
{
M[(t - 1) * rate + j] = S[j] ^ c[(t - 1) * rate + j];
}
for (j = 0; j < l; ++j)
{
S[j] = c[(t - 1) * rate + j];
S[l] ^= 0x80;
}
printstate("process plaintext:", S);
// finalization
for (i = 0; i < klen; ++i)
{
S[rate + i] ^= k[i];
}
permutations(S, 12 - a, a);
for (i = 0; i < klen; ++i)
{
S[size - klen + i] ^= k[i];
}
printstate("finalization:", S);
// return -1 if verification fails
for (i = 0; i < klen; ++i)
{
if (c[clen - klen + i] != S[size - klen + i])
{
return -1;
}
}
// return plaintext
*mlen = clen - klen;
for (i = 0; i < *mlen; ++i)
{
m[i] = M[i];
}
return 0;
}
Thanks for the help in advance, I am really clueless right now.
However, in the loop, I was tasked to add a switch case so that it
will switch between encryption and decryption. But I have no idea what
kind of condition to use to differentiate the encryption and
decryption.
According to your comments, the calls for encryption and decryption are happening inside of CDC_Device_ReceiveByte and CDC_Device_SendByte, which means you need to create a state machine for sending and receiving of the bytes. The condition that you would use for this is the return value of CDC_Device_BytesReceived.
You can create an enum for the states, and a simple struct for holding the current state along with any other pertinent information. You can create a function for the state machine that maps out what to do given the current state. Your while(1) loop will simply call the function to ensure the state machine moves along. You might implement that like this:
typedef enum{
IDLE,
DECRYPTING,
ENCRYPTING,
}state_t;
typedef struct{
state_t current_state;
}fsm_t;
fsm_t my_fsm = {0}; //initial state is idle
void myFSM(void){
switch(my_fsm.current_state){
case IDLE:
{
/* Check if data received */
Bytes = CDC_Device_BytesReceived(&VirtualSerial_CDC_Interface);
if(Bytes) my_fsm.current_state = DECRYPTING; //we have data, decrypt it
break;
}
case DECRYPTING:
{
/* Send data back to the host */
ch = CDC_Device_ReceiveByte(&VirtualSerial_CDC_Interface);
my_fsm.current_state = ENCRYPTING; // encrypt byte that we are going to send to host
break;
}
case ENCRYPTING:
{
CDC_Device_SendByte(&VirtualSerial_CDC_Interface, ch);
--Bytes;
if(Bytes){
my_fsm.current_state = DECRYPTING; // still have bytes left to decrypt
}
else my_fsm.current_state = IDLE;
break;
}
default:
{
asm("nop"); // whoops
break;
}
}
}
Now your loop is just
while(1){
myFSM();
}