/** @file bunch-ctx.cu implementation of bunch context, mostly per-device functions */ //#include #include #include //#include //#include #include #include #include //#include //#include //#include using namespace std; #include "bunch-ctx.h" #include "global-ctx.h" #include "data.h" #include "dynthrust.h" #include "options.h" #include "sector-layer.h" #include "triplet-finder.h" #include "util.h" #include "vec.h" #include "data.cu.h" #include "sector-layer.cu.h" #include "track.cu.h" #include "triplet.cu.h" #include "bunch-ctx-internal.h" extern __constant__ GlobalCtx global_ctx_g; //extern __device__ GlobalCtx global_ctx_g; extern __constant__ SectorLayer sector_layers_g[NSECTOR_LAYERS]; /** device-side angle constants */ __constant__ float max_phis_g[3][3] = MAX_PHIS; /** device-side angle constants */ __constant__ float min_cos_phis_g[3][3]; // kernels for dynamic parallelism and host streams #include "bunch-ctx-kernels.cu.h" #if WITH_DYN_PAR // functions for dynamic parallelism in a separate file #include "bunch-ctx-dyn.cu.h" #endif // functions for single thread block in a separate file #include "bunch-ctx-tblock.cu.h" // functions for single host stream in a separate file //#include "bunch-ctx-hstream.cu.h" // kernel thread functions __device__ void BunchCtx::tube_hit_histo_tkfun(int ihit) { if(ihit >= nhits) return; atomicAdd(&tube_hit_counts[hits[ihit].tube_id], 1); } // tube_hit_histo_tkfun __device__ void BunchCtx::tube_hit_group_tkfun(int ihit) { if(ihit >= nhits) return; int tube_id = hits[ihit].tube_id; int pos = tube_hit_starts[tube_id] + atomicAdd(&tube_hit_counts[tube_id], 1); // int pos = atomicAdd(&tube_hit_counts[tube_id], 1); // int pos = tube_hit_counts[tube_id]; // if(pos < 0 || pos >= nhits) // printf("pos = %d, tube_id = %d\n", pos, tube_id); tube_hits[pos] = hits[ihit]; } // tube_hit_group_tkfun __device__ void BunchCtx::nz_tubes_tkfun(int itube) { const GlobalCtx *g = &global_ctx_g; if(itube == 0 || itube >= g->ntubes) return; if(tube_hit_counts[itube] > 0) { //int pos = atomicAdd(&b->nitubes, 1); int pos = atomicAggInc(&nitubes); itubes[pos] = itube; } } // nz_tubes_tkfun __device__ void BunchCtx::find_triplets_tkfun(int ihit) { const GlobalCtx *g = &global_ctx_g; if(ihit >= npivot_hits) return; Triplet triplet; int itube = pivot_hits[ihit].tube_id; if(g->d_tubes[itube].dir.z != 1.0f) { // do not handle pivots in skewed tubes return; } triplet.set_pivot_hit(pivot_hits[ihit], g->d_tubes); triplet.add_neighbors (g->d_nneighbors, g->d_neighbor_tubes, g->neighbor_pitch, tube_hits, tube_hit_starts, g->d_tubes); // check whether the triplet is good //triplet.is_active = triplet.is_good(); //if(triplet.is_active) { if(triplet.is_good()) { // add triplet to a range int irange = g->d_tubes[itube].pivot_range_id; // TODO: use warp-aggregated atomics //int itriplet = atomicAdd(&ntriplets[irange], 1); int itriplet = atomicAggInc(ntriplets, irange); triplet.save_cms(); triplets[irange * g->max_ntriplets + itriplet] = triplet; } } // find_triplets_tkfun __device__ void BunchCtx::find_half_skewlets_tkfun(int ihit) { const GlobalCtx *g = &global_ctx_g; if(ihit >= nskewed_hits) return; HalfSkewlet half_skewlet; int itube = skewed_hits[ihit].tube_id; half_skewlet.set_pivot_hit(skewed_hits[ihit], g->d_tubes); half_skewlet.add_neighbors (g->d_nneighbors, g->d_neighbor_tubes, g->neighbor_pitch, tube_hits, tube_hit_starts, g->d_tubes); // the triplet is always good //half_skewlet.is_active = true; half_skewlet.save_cms(); int irange = g->d_tubes[itube].skewed_range_id; //int ihalf_skewlet = atomicAdd(&nhalf_skewlets[irange], 1); int ihalf_skewlet = atomicAggInc(nhalf_skewlets, irange); if(ihalf_skewlet < g->max_nskewlets) half_skewlets[irange * g->max_nskewlets + ihalf_skewlet] = half_skewlet; } // find_half_skewlets_tkfun __device__ void BunchCtx::combine_half_skewlets_tkfun (int irange, int ihalf, int jhalf) { const GlobalCtx *g = &global_ctx_g; // inner (lower) range -> x, outer (upper) range -> y // check thread ranges int ninner = nhalf_skewlets[irange]; int nouter = nhalf_skewlets[irange + 1]; // assuming jhalf is constant across thread block // __shared__ HalfSkewlet outer_half; // __syncthreads(); // if(jhalf < nouter && threadIdx.x == 0) // outer_half = half_skewlets[(irange + 1) * g->max_nskewlets + jhalf]; // __syncthreads(); if(ihalf >= ninner || jhalf >= nouter) return; HalfSkewlet inner_half = half_skewlets [irange * g->max_nskewlets + ihalf]; HalfSkewlet outer_half = half_skewlets [(irange + 1) * g->max_nskewlets + jhalf]; // hit count test if(inner_half.nhits + outer_half.nhits < 4) return; // time test if(inner_half.min_t0() > outer_half.max_t0() || inner_half.max_t0() < outer_half.min_t0()) return; // try to kill skewlets from different sectors //if(g->d_tubes[inner_half.pivot_tube_id].sector != // g->d_tubes[outer_half.pivot_tube_id].sector) // printf("skewlets from different sectors\n"); //return; // CMS test fvec3 inner_cms = inner_half.cms(), outer_cms = outer_half.cms(); if(dist2(inner_cms, outer_cms) > 100.0f) return; // fast rejection of half-skewlet pairs from different sectors // TODO: this wasn't in Marius' original code, check if this is OK //if(g->d_tubes[inner_half.pivot_tube_id].sector != // g->d_tubes[outer_half.pivot_tube_id].sector) //return; // else skewlet halfs must be considered for poca test int ipair = atomicAggInc(&npre_poca_pairs[irange]); if(ipair < g->max_nskewlets) { pre_poca_pairs[(irange * g->max_nskewlets + ipair) * 2 + 0] = ihalf; pre_poca_pairs[(irange * g->max_nskewlets + ipair) * 2 + 1] = jhalf; } } // combine_half_skewlets_tkfun __device__ void BunchCtx::poca_test_tkfun(int irange, int ipair, bool with_bins) { const GlobalCtx *g = &global_ctx_g; int npairs = npre_poca_pairs[irange]; if(ipair >= npairs) return; int *ppair = &pre_poca_pairs[(irange * g->max_nskewlets + ipair) * 2]; int ihalf = ppair[0], jhalf = ppair[1]; HalfSkewlet inner_half = half_skewlets[irange * g->max_nskewlets + ihalf]; HalfSkewlet outer_half = half_skewlets[(irange + 1) * g->max_nskewlets + jhalf]; fvec3 inner_cms = inner_half.cms(), outer_cms = outer_half.cms(); fvec3 inner_dir = g->d_tubes[inner_half.pivot_tube_id].dir; fvec3 outer_dir = g->d_tubes[outer_half.pivot_tube_id].dir; fvec3 poca = find_crossing(inner_cms, inner_dir, outer_cms, outer_dir); //fvec3 poca = find_crossing_fast(inner_cms, inner_dir, outer_cms, outer_dir); float zmin = max(inner_half.zmin, outer_half.zmin); float zmax = min(inner_half.zmax, outer_half.zmax); if(poca.z <= zmin || poca.z >= zmax) return; // if(g->d_tubes[inner_half.pivot_tube_id].sector != // g->d_tubes[outer_half.pivot_tube_id].sector) // printf("skewlets from different sectors\n"); // store the skewlet //int iskewlet = atomicAdd(&nskewlets[irange], 1); int iskewlet = atomicAggInc(&nskewlets[irange]); int skewlet_ind = irange * g->max_nskewlets + iskewlet; if(iskewlet < g->max_nskewlets) { Skewlet skewlet(inner_half); skewlet.add_triplet(outer_half); // HACK: store poca in sum_hit_pos skewlet.sum_hit_pos = poca; // assign the skewlet to bin if binning is enabled if(with_bins) { int sector = g->d_tubes[skewlet.pivot_tube_id].sector; int bin = g->d_skewlet_bin_layers[irange * g->nsectors + sector] .point_bin(skewlet.sum_hit_pos.xy()); skewlet.inext = atomicExch(&skewlet_bins[bin], iskewlet); } // store skewlet's SoA data poca_xs[skewlet_ind] = poca.x; poca_ys[skewlet_ind] = poca.y; min_t0s[skewlet_ind] = skewlet.min_t0(); skewlets[skewlet_ind] = skewlet; } } // poca_test_tkfun __device__ void BunchCtx::combine_triplets_tkfun (int inner_id, int outer_id, float max_phi, int tinner, int touter) { const GlobalCtx *g = &global_ctx_g; // inner -> x, outer -> y int inner_start = inner_id * g->max_ntriplets; int outer_start = outer_id * g->max_ntriplets; int iinner = inner_start + tinner; int iouter = outer_start + touter; int inner_end = inner_start + ntriplets[inner_id]; int outer_end = outer_start + ntriplets[outer_id]; if(iinner >= inner_end || iouter >= outer_end) return; // check times // TODO: not sure if the second comparison shoud be < and not > Triplet inner_triplet = triplets[iinner]; Triplet outer_triplet = triplets[iouter]; if(inner_triplet.min_t0() > outer_triplet.max_t0() || inner_triplet.max_t0() < outer_triplet.min_t0()) return; // check angles fvec3 inner_cms = inner_triplet.cms(), outer_cms = outer_triplet.cms(); //if(fabsf(dphi(inner_cms, outer_cms)) > max_phi) // return; // NOTE: max_phi temporary refers to the cos(actual max_phi) //if(cosdphi(inner_cms, outer_cms) < max_phi) // return; fvec2 inner_cms2 = inner_cms.xy(), outer_cms2 = outer_cms.xy(); if(dot(inner_cms2, outer_cms2) < max_phi * sqrtf(len2(inner_cms2) * len2(outer_cms2))) return; // output track candidate Track track; track.wstart = max(inner_triplet.min_accept_window(), outer_triplet.min_accept_window()); track.wend = min(inner_triplet.max_accept_window(), outer_triplet.max_accept_window()); track.t0 = max(inner_triplet.min_t0(), outer_triplet.min_t0()); track.vertex = inner_cms; track.momentum = outer_cms; track.inner_tube_id = inner_triplet.pivot_tube_id; track.outer_tube_id = outer_triplet.pivot_tube_id; // TODO: use aggregated atomics //int track_pos = atomicAdd(&ncand_tracks, 1); int track_pos = atomicAggInc(&ncand_tracks); if(track_pos < g->max_ntracks) cand_tracks[track_pos] = track; } // combine_triplets_tkfun __device__ void compute_circles_tkfun (layptr cand_tracks, int ncand_tracks, int itrack) { if(itrack >= ncand_tracks) return; //Track *track = &cand_tracks[itrack]; layptr track = cand_tracks + itrack; //Track track = cand_tracks[itrack]; //layptr track(&cand_tracks[itrack], 0); fvec3 inner_cms = track->vertex, outer_cms = track->momentum; float a = inner_cms.x, b = inner_cms.y, c = outer_cms.x, d = outer_cms.y; // NOTE: this optimization loses 2 tracks (16404 -> 16402) float ibcad = 1.0f / (b * c - a * d); // float px = 0.5f * (b * c * c - a * a * d - b * b * d + b * d * d) / // (b * c - a * d); // float py = 0.5f * (a * a * c + b * b * c - a * c * c - a * d * d) / // (b * c - a * d); float px = 0.5f * (b * c * c - a * a * d - b * b * d + b * d * d) * ibcad; float py = 0.5f * (a * a * c + b * b * c - a * c * c - a * d * d) * ibcad; track->center = fvec3::from(px, py, 0); track->radius = len((fvec3)track->center); track->is_clockwise = b > a * d / c; if(c < 0.0f) track->is_clockwise = !track->is_clockwise; //cand_tracks[itrack] = track; } // compute_circles_tkfun __device__ void BunchCtx::eval_tracks_tubes_shared_tkfun(int itrack) { const GlobalCtx *g = &global_ctx_g; // track and time window Track track = cand_tracks[itrack]; layptr tubes = g->d_tubes; // row and sector ranges int inner_row = g->d_tubes[track.inner_tube_id].row; int outer_row = g->d_tubes[track.outer_tube_id].row; int inner_sector = g->d_tubes[track.inner_tube_id].sector; int outer_sector = g->d_tubes[track.outer_tube_id].sector; int min_row = min(inner_row, outer_row) - 1; int max_row = max(inner_row, outer_row) + 2; int min_sector = min(inner_sector, outer_sector); int max_sector = max(inner_sector, outer_sector); // check all tubes against this track const float max_hit_dist = 0.5f; __shared__ Tube l_tubes[SHARED_NTUBES]; __shared__ int l_tube_hit_starts[SHARED_NTUBES]; __shared__ int l_tube_hit_ends[SHARED_NTUBES]; for(int ij1tube = 0; ij1tube < nitubes; ij1tube += SHARED_NTUBES) { int l_tube_end = min(SHARED_NTUBES, nitubes - ij1tube); __syncthreads(); // load tubes //if(threadIdx.x < l_tube_end) { for(int lijtube = threadIdx.x; lijtube < l_tube_end; lijtube += blockDim.x) { //int ijtube = ij1tube + threadIdx.x; int ijtube = ij1tube + lijtube; int jtube = itubes[ijtube]; // load data from main memory Tube tube = tubes[jtube]; int tube_hit_start = tube_hit_starts[jtube]; int tube_hit_end = tube_hit_starts[jtube + 1]; l_tubes[lijtube] = tube; l_tube_hit_starts[lijtube] = tube_hit_start; l_tube_hit_ends[lijtube] = tube_hit_end; } __syncthreads(); for(int ljtube = 0; ljtube < l_tube_end; ljtube++) { int row = l_tubes[ljtube].row, sector = l_tubes[ljtube].sector; float dirz = l_tubes[ljtube].dir.z; if(row < min_row || row > max_row || sector < min_sector || sector > max_sector || dirz != 1.0f) continue; int jhit_start = l_tube_hit_starts[ljtube]; int jhit_end = l_tube_hit_ends[ljtube]; int ntube_hits = 0; for(int jhit = jhit_start; jhit < jhit_end; jhit++) { float hit_t = tube_hits[jhit].t; // check time if(track.wstart >= hit_t || hit_t >= track.wend) continue; // check position if(track.hit_in_range(l_tubes[ljtube].pos, max_hit_dist)) { ntube_hits++; track.add_hit_time(hit_t); break; } } // for (each jtube hit) if(ntube_hits > 0) track.nhits++; } } // for(each jtube) cand_tracks[itrack] = track; cand_tracks[itrack].nhits = track.nhits; } // eval_tracks_tubes_shared_tkfun __device__ void BunchCtx::eval_tracks_tubes_raster_tkfun(int itrack) { const GlobalCtx *g = &global_ctx_g; // track and time window if(itrack >= ncand_tracks) return; Track track = cand_tracks[itrack]; layptr tubes = g->d_tubes; // row and sector ranges int inner_row = tubes[track.inner_tube_id].row; int outer_row = tubes[track.outer_tube_id].row; int inner_sector = tubes[track.inner_tube_id].sector; int outer_sector = tubes[track.outer_tube_id].sector; int min_row = min(inner_row, outer_row) - 1; int max_row = max(inner_row, outer_row) + 2; int min_sector = min(inner_sector, outer_sector); int max_sector = max(inner_sector, outer_sector); // pre-load all sector-layers to shared memory; this helps /* __syncthreads(); __shared__ SectorLayer l_sector_layers[NSECTOR_LAYERS]; __syncthreads(); for(int isl = threadIdx.x; isl < NSECTOR_LAYERS; isl += blockDim.x) { l_sector_layers[isl] = g->d_sector_layers[isl]; } __syncthreads(); */ // go through all sector-layers //for(int row = 0; row < g->nrows; row++) //for(int row = 0; row < g->nrows; row = next_straight_row(row)) // for(int sector = 0; sector < g->nsectors; sector++) { //for(int row = min_row; row <= max_row; row = next_straight_row(row)) for(int row = min_row; row <= max_row; row++) for(int sector = min_sector; sector <= max_sector; sector++) { // discard sector-layers not related to this track //if(row < min_row || row > max_row || // sector < min_sector || sector > max_sector) // continue; //int row_index = straight_row_index(row); int row_index = row; int isector_layer = row_index * g->nsectors + sector; //const SectorLayer §or_layer = l_sector_layers[isector_layer]; const SectorLayer §or_layer = sector_layers_g[isector_layer]; //const SectorLayer §or_layer = g->sector_layers[isector_layer]; if(sector_layer.is_skewed) continue; // get the tubes range2 sl_tubes = sector_layer.intersect_ring_tubes (track.center.xy(), track.radius); if(sl_tubes.is_empty()) continue; for(range2_iter i = sl_tubes.begin(); i != sl_tubes.end(); ++i) { // process current tube int jtube = *i; // check whether the hit sign is in range (the hit is in range) fvec3 pos = tubes[jtube].pos; if(!track.hit_sign_in_range(pos)) continue; // iterate through hits int jhit_start = tube_hit_starts[jtube]; int jhit_end = tube_hit_starts[jtube + 1]; int ntube_hits = 0; for(int jhit = jhit_start; jhit < jhit_end; jhit++) { float hit_t = tube_hits[jhit].t; // check time if(track.wstart >= hit_t || hit_t >= track.wend) continue; // no need to check position, due to algorithm used ntube_hits++; track.add_hit_time(hit_t); } // for each jtube hit if(ntube_hits > 0) track.nhits++; } // for each tube } // for each sector-layer //cand_tracks[itrack] = track; cand_tracks[itrack].nhits = track.nhits; } // eval_tracks_tubes_raster_tkfun __device__ void BunchCtx::eval_tracks_skewlets_all_tkfun(int itrack) { const GlobalCtx * const g = &global_ctx_g; // for shared tracks, empty tubes should not cause problems //if(itrack >= ncand_tracks) // return; // track and time window Track track = cand_tracks[itrack];//TODO: Uncoalesced global load here // float wstart = track.wstart, wend = track.wend; // fvec3 center = track.center; // float radius = track.radius; // bool is_clockwise = track.is_clockwise; // check skewlets against this track const float max_hit_dist = 0.5f; // skewlets in shared memory __shared__ float l_min_t0s[SHARED_NSKEWLETS]; //__shared__ float l_max_t0s[SHARED_NSKEWLETS]; __shared__ fvec2 l_pocas[SHARED_NSKEWLETS]; // check all skewlet ranges against this track for(int iskewlet_range = 0; iskewlet_range < g->nskewlet_ranges; iskewlet_range++) { const int cur_nskewlets = nskewlets[iskewlet_range]; if(cur_nskewlets == 0) continue; int range_hits = 0; for(int i1skewlet = 0; i1skewlet < cur_nskewlets; i1skewlet += SHARED_NSKEWLETS) { const int skewlet_end = min(cur_nskewlets, i1skewlet + SHARED_NSKEWLETS); const int l_nskewlets = skewlet_end - i1skewlet; // check if need to check this layer further //if(__syncthreads_and(range_hits > 0)) // break; __syncthreads(); // load data in shared memory for(int iiskewlet = threadIdx.x; iiskewlet < l_nskewlets; iiskewlet += blockDim.x) { int skewlet_ind = iskewlet_range * g->max_nskewlets + i1skewlet + iiskewlet; //const Skewlet &skewlet = skewlets[skewlet_ind]; //const float min_t0 = skewlet.min_t0(); //TODO: Uncoalesced global load here //const fvec2 poca = skewlet.sum_hit_pos.xy();//TODO: Uncoalesced global //load here float min_t0 = min_t0s[skewlet_ind]; fvec2 poca = fvec2::from(poca_xs[skewlet_ind], poca_ys[skewlet_ind]); l_min_t0s[iiskewlet] = min_t0; l_pocas[iiskewlet] = poca; } // for(iiskewlet loaded to SM) __syncthreads(); // TODO: exit early if every track has at least one hit if(itrack < ncand_tracks) { // test against skewlets #pragma unroll 4 for(int iiskewlet = 0; iiskewlet < l_nskewlets; iiskewlet++) { if(track.wstart >= l_min_t0s[iiskewlet] || l_min_t0s[iiskewlet] >= track.wend) continue; if(track.hit_in_range(l_pocas[iiskewlet], max_hit_dist)) range_hits++; //if(hit_in_range(center, radius, is_clockwise, // l_pocas[iiskewlet], max_hit_dist)) } // for(iiskewlet in SM) } } // for(i1skewlet) // #pragma unroll 4 // for(int iskewlet = 0; iskewlet < cur_nskewlets; iskewlet++) { // int skewlet_ind = iskewlet_range * g->max_nskewlets + iskewlet; // // Skewlet skewlet = skewlets[skewlet_ind]; // // float min_t0 = skewlet.min_t0(); // // fvec2 poca = skewlet.sum_hit_pos.xy(); // // if(track.wstart >= skewlet.min_t0() || skewlet.min_t0() >= track.wend) // // continue; // // if(!track.triplet_in_range(skewlet, max_hit_dist)) // // continue; // float min_t0 = min_t0s[skewlet_ind]; // fvec2 poca = fvec2::from(poca_xs[skewlet_ind], poca_ys[skewlet_ind]); // if(track.wstart >= min_t0 || min_t0 >= track.wend) // continue; // if(!track.hit_in_range(poca, max_hit_dist)) // continue; // // need just one skewlet for the skewlet range to be added // //track.nhits++; // //break; // range_hits++; // } // for(each skewlet) if(range_hits > 0) track.nhits++; } // for(iskewlet_range) cand_tracks[itrack].nhits = track.nhits;//TODO: Uncoalesced global store //here //cand_tracks[itrack] = track; } // eval_tracks_skewlets_tkfun __device__ void BunchCtx::eval_tracks_skewlets_bins_tkfun(int itrack) { const GlobalCtx * const g = &global_ctx_g; // for shared tracks, empty tubes should not cause problems if(itrack >= ncand_tracks) return; // track and time window Track track = cand_tracks[itrack];//TODO: Uncoalesced global load here // remove tracks with too few hits //if(track.nhits < 4) // return; // check skewlets against this track const float max_hit_dist = 0.5f; int nskewlets_tried = 0, nbins_tried = 0; // check all skewlet ranges against this track for(int iskewlet_range = 0; iskewlet_range < g->nskewlet_ranges; iskewlet_range++) { int range_hits = 0; // decouple sectors from bins (i.e., different threads can // process different sectors //for(int sector = 0; sector < g->nsectors; sector++) { int sector = 0; for(; sector < g->nsectors;) { // find a good sector first range2 sl_bins = range2(range::empty(), range::empty()); for(; sector < g->nsectors && sl_bins.is_empty(); sector++) { const SectorLayer &sl = g->d_skewlet_bin_layers[iskewlet_range * g->nsectors + sector]; sl_bins = sl.intersect_ring_skewlets (track.center.xy(), track.radius); } if(sl_bins.is_empty()) continue; //printf("intersection range is not empty\n"); for(range2_iter ibin = sl_bins.begin(); ibin != sl_bins.end(); ibin++) { int bin = *ibin, iskewlet = skewlet_bins[bin]; if(iskewlet != -1) nbins_tried++; //while(iskewlet > -1 && nskewlets_tried < 200) { while(iskewlet > -1) { //while(iskewlet > -1 && iskewlet < nskewlets[iskewlet_range]) { //if(iskewlet != -1) { nskewlets_tried++; int skewlet_ind = iskewlet_range * g->max_nskewlets + iskewlet; Skewlet skewlet = skewlets[skewlet_ind]; iskewlet = skewlet.inext; //float min_t0 = skewlet->min_t0(); //fvec2 poca = skewlet->sum_hit_pos.xy(); float min_t0 = min_t0s[skewlet_ind]; fvec2 poca = fvec2::from(poca_xs[skewlet_ind], poca_ys[skewlet_ind]); if(track.wstart >= min_t0 || min_t0 >= track.wend) continue; //if(track.triplet_in_range(skewlet, max_hit_dist)) { if(!track.hit_in_range(poca, max_hit_dist)) continue; range_hits++; } // while(iskewlet) } // for(ibin) } // for(sector) //range_end: if(range_hits > 0) track.nhits++; } // for(iskewlet_range) //printf("bunch %d, track %d: %d bins and %d skewlets tried\n", bunch_id, // itrack, nbins_tried, nskewlets_tried); cand_tracks[itrack].nhits = track.nhits; //cand_tracks[itrack] = track; } // eval_tracks_skewlets_tkfun