Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ class PerepelkinIConvexHullGrahamScanOMP : public BaseTask {

static size_t FindPivotParallel(const std::vector<std::pair<double, double>> &pts);
static void ParallelSort(std::vector<std::pair<double, double>> &data, const std::pair<double, double> &pivot);

static void DataPartitioning(size_t total_size, const int &threads, std::vector<int> &start);
static void HullConstruction(std::vector<std::pair<double, double>> &hull,
const std::vector<std::pair<double, double>> &pts,
const std::pair<double, double> &pivot);
static bool AngleCmp(const std::pair<double, double> &a, const std::pair<double, double> &b,
const std::pair<double, double> &pivot);
static double Orientation(const std::pair<double, double> &p, const std::pair<double, double> &q,
Expand Down
97 changes: 61 additions & 36 deletions tasks/perepelkin_i_convex_hull_graham_scan/omp/src/ops_omp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,44 +47,41 @@ bool PerepelkinIConvexHullGrahamScanOMP::RunImpl() {

// Sequential hull construction
std::vector<std::pair<double, double>> hull;
hull.reserve(pts.size() + 1);

hull.push_back(pivot);
hull.push_back(pts[0]);

for (size_t i = 1; i < pts.size(); i++) {
while (hull.size() >= 2 && Orientation(hull[hull.size() - 2], hull[hull.size() - 1], pts[i]) <= 0) {
hull.pop_back();
}

hull.push_back(pts[i]);
}
HullConstruction(hull, pts, pivot);

GetOutput() = std::move(hull);
return true;
}

size_t PerepelkinIConvexHullGrahamScanOMP::FindPivotParallel(const std::vector<std::pair<double, double>> &pts) {
size_t pivot_idx = 0;
const int threads = std::min(ppc::util::GetNumThreads(), static_cast<int>(pts.size()));
std::vector<size_t> local_idx(threads, 0);

#pragma omp parallel default(none) shared(pts, pivot_idx) num_threads(ppc::util::GetNumThreads())
#pragma omp parallel default(none) shared(pts, local_idx) num_threads(threads)
{
size_t local_idx = pivot_idx;
int tid = omp_get_thread_num();
size_t local = 0;

#pragma omp for nowait
for (size_t i = 1; i < pts.size(); i++) {
if (pts[i].second < pts[local_idx].second ||
(pts[i].second == pts[local_idx].second && pts[i].first < pts[local_idx].first)) {
local_idx = i;
if (pts[i].second < pts[local].second ||
(pts[i].second == pts[local].second && pts[i].first < pts[local].first)) {
local = i;
}
}

#pragma omp critical
{
if (pts[local_idx].second < pts[pivot_idx].second ||
(pts[local_idx].second == pts[pivot_idx].second && pts[local_idx].first < pts[pivot_idx].first)) {
pivot_idx = local_idx;
}
local_idx[tid] = local;
}

// Sequential reduction
size_t pivot_idx = 0;

for (int tid = 0; tid < threads; tid++) {
size_t i = local_idx[tid];

if (pts[i].second < pts[pivot_idx].second ||
(pts[i].second == pts[pivot_idx].second && pts[i].first < pts[pivot_idx].first)) {
pivot_idx = i;
}
}

Expand All @@ -93,20 +90,14 @@ size_t PerepelkinIConvexHullGrahamScanOMP::FindPivotParallel(const std::vector<s

void PerepelkinIConvexHullGrahamScanOMP::ParallelSort(std::vector<std::pair<double, double>> &data,
const std::pair<double, double> &pivot) {
size_t n = data.size();
int threads = ppc::util::GetNumThreads();

if (n < 10000) {
std::ranges::sort(data, [&](const auto &a, const auto &b) { return AngleCmp(a, b, pivot); });
return;
}
const int threads = std::min(ppc::util::GetNumThreads(), static_cast<int>(data.size()));

// Partitioning
std::vector<int> start(threads + 1);
for (int i = 0; i <= threads; i++) {
start[i] = static_cast<int>(i * n / threads);
}
DataPartitioning(data.size(), threads, start);

#pragma omp parallel default(none) shared(data, start, pivot) num_threads(ppc::util::GetNumThreads())
// Parallel local sorting
#pragma omp parallel default(none) shared(data, start, pivot) num_threads(threads)
{
int tid = omp_get_thread_num();
std::sort(data.begin() + start[tid], data.begin() + start[tid + 1],
Expand All @@ -115,7 +106,7 @@ void PerepelkinIConvexHullGrahamScanOMP::ParallelSort(std::vector<std::pair<doub

// Merge sorted segments
for (int size = 1; size < threads; size *= 2) {
#pragma omp parallel for default(none) shared(data, start, threads, size, pivot) num_threads(ppc::util::GetNumThreads())
#pragma omp parallel for default(none) shared(data, start, threads, size, pivot) num_threads(threads)
for (int i = 0; i < threads; i += 2 * size) {
if (i + size >= threads) {
continue;
Expand All @@ -131,6 +122,40 @@ void PerepelkinIConvexHullGrahamScanOMP::ParallelSort(std::vector<std::pair<doub
}
}

void PerepelkinIConvexHullGrahamScanOMP::DataPartitioning(size_t total_size, const int &threads,
std::vector<int> &start) {
size_t base = total_size / threads;
size_t rem = total_size % threads;

size_t offset = 0;

for (int i = 0; i < threads; i++) {
start[i] = static_cast<int>(offset);

size_t extra = std::cmp_less(i, rem) ? 1 : 0;
offset += base + extra;
}

start[threads] = static_cast<int>(total_size);
}

void PerepelkinIConvexHullGrahamScanOMP::HullConstruction(std::vector<std::pair<double, double>> &hull,
const std::vector<std::pair<double, double>> &pts,
const std::pair<double, double> &pivot) {
hull.reserve(pts.size() + 1);

hull.push_back(pivot);
hull.push_back(pts[0]);

for (size_t i = 1; i < pts.size(); i++) {
while (hull.size() >= 2 && Orientation(hull[hull.size() - 2], hull[hull.size() - 1], pts[i]) <= 0) {
hull.pop_back();
}

hull.push_back(pts[i]);
}
}

double PerepelkinIConvexHullGrahamScanOMP::Orientation(const std::pair<double, double> &p,
const std::pair<double, double> &q,
const std::pair<double, double> &r) {
Expand Down
Loading