#include #include #include #ifdef NO_CHECK_ERRORS #define cucheck(call) call #else /** a macro for checking CUDA calls */ #define cucheck(call) \ { \ cudaError_t cucheck_err = (call); \ if(cucheck_err != cudaSuccess) { \ const char* err_str = cudaGetErrorString(cucheck_err); \ fprintf(stderr, "%s (%d): %s in %s\n", __FILE__, __LINE__, err_str, #call); \ } \ } #endif #define MAX_NSTREAMS 32 __global__ void kernel( int* counter, const int s ) { ++counter[s]; } void process_bunch( int i, int* d_counter, int* h_counter, cudaStream_t* streams ) { //find_tube_hits_hstream cucheck( cudaMemsetAsync ( d_counter + i, 0, sizeof(int), streams[i] ) ); kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); cucheck( cudaMemsetAsync ( d_counter + i, 0, sizeof(int), streams[i] ) ); kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); cucheck(cudaStreamSynchronize( streams[i] )); //find_half_skewlets_hstream kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); //find_triplets_hstream cucheck( cudaMemsetAsync ( d_counter + i, 0, sizeof(int), streams[i] ) ); kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); cucheck(cudaStreamSynchronize( streams[i] )); //find_skewlets_pre_poca_hstream for ( int k=0; k<3;++k) { kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); } cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); cucheck(cudaStreamSynchronize( streams[i] )); //find_skewlets_poca_hstream for ( int k=0; k<3;++k) { kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); } cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); //find_cand_tracks_hstream for ( int k=0; k<3;++k) { kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); } cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); cucheck(cudaStreamSynchronize( streams[i] )); //eval_cand_tracks_hstream kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); cucheck( cudaMemsetAsync ( d_counter + i, 0, sizeof(int), streams[i] ) ); kernel<<<1,1,0,streams[i]>>>(d_counter,i); cucheck(cudaGetLastError()); cucheck(cudaMemcpyAsync( h_counter + i, d_counter + i, sizeof(int), cudaMemcpyDeviceToHost, streams[i] )); } int main() { cucheck(cudaSetDevice(0)); int n = 1000; int num_streams = 32; #pragma omp parallel { #pragma omp master { num_streams = omp_get_num_threads(); } } std::cout<<"Using "<