Skip to content
Snippets Groups Projects
Commit 721d7db7 authored by 数学の武士's avatar 数学の武士
Browse files

Code design changed

parent 304cd577
No related branches found
No related tags found
No related merge requests found
......@@ -36,10 +36,14 @@ if(INCLUDE_CUDA)
endif(INCLUDE_CUDA)
set(SOURCES
buffer.cpp
mf-utils.cpp
memory-faucet.cpp
)
set(HEADER
buffer.h
mf-utils.h
memory-faucet.h
)
......
#include "buffer.h"
#include "MemoryProcessing.h"
#ifdef INCLUDE_CUDA
#include "MemoryProcessing.cuh"
#endif
// void dyn_dispatch_buffer::reallocate(buffer_base* obj, const size_t required_size)
// {
// obj->reallocate(required_size);
// }
// void dyn_dispatch_buffer::deallocate(buffer_base* obj)
// {
// obj->deallocate();
// }
buffer_base::buffer_base()
{
ptr = nullptr;
allocated_size = 0;
scalar_size = 0;
is_free = true;
id = -1;
// ! resolve
mem = MemType::CPU;
}
void buffer_base::reallocate(const size_t required_size)
{
if(mem == MemType::CPU)
memproc::realloc<MemType::CPU>((void *&)ptr, allocated_size, required_size);
#ifdef INCLUDE_CUDA
else if(mem == MemType::GPU)
memproc::realloc<MemType::GPU>((void *&)ptr, allocated_size, required_size);
#endif
}
void buffer_base::deallocate()
{
if(mem == MemType::CPU)
memproc::dealloc<MemType::CPU>((void *&)ptr, allocated_size);
#ifdef INCLUDE_CUDA
else if(mem == MemType::GPU)
memproc::dealloc<MemType::GPU>((void *&)ptr, allocated_size);
#endif
}
buffer_base::~buffer_base()
{
deallocate();
ptr = nullptr;
scalar_size = 0;
is_free = true;
id = -1;
}
buffer_base::buffer_base(const buffer_base& other)
{
mem = other.mem;
allocated_size = 0;
reallocate(other.get_size());
is_free = other.get_status();
id = other.get_id();
}
buffer_base::buffer_base(buffer_base&& other)
{
mem = other.mem;
ptr = other.ptr;
allocated_size = other.allocated_size;
scalar_size = other.scalar_size;
is_free = other.is_free;
id = other.id;
other.ptr = nullptr;
other.allocated_size = 0;
other.scalar_size = 0;
other.is_free = false;
other.id = -1;
}
buffer_base& buffer_base::operator=(const buffer_base& other)
{
if (this == &other)
return *this;
mem = other.mem;
reallocate(other.get_size());
is_free = other.get_status();
id = other.get_id();
return *this;
}
buffer_base& buffer_base::operator=(buffer_base&& other)
{
if (this != &other)
{
mem = other.mem;
deallocate();
ptr = other.ptr;
allocated_size = other.allocated_size;
scalar_size = other.scalar_size;
is_free = other.is_free;
id = other.id;
other.ptr = nullptr;
other.allocated_size = 0;
other.scalar_size = 0;
other.is_free = false;
other.id = -1;
}
return *this;
}
bool buffer_base::is_available() const
{
return is_free;
}
buffer_base::buffer_base(const size_t required_size, const MemType mem_in)
{
mem = mem_in;
ptr = nullptr;
allocated_size = 0;
reallocate(required_size);
scalar_size = 0;
is_free = false;
}
void* buffer_base::get_ptr()
{
return ptr;
}
bool buffer_base::get_status() const
{
return is_free;
}
size_t buffer_base::get_size() const
{
return allocated_size;
}
int buffer_base::get_id() const
{
return id;
}
void buffer_base::set_allocated_size(const size_t required_size)
{
allocated_size = required_size;
}
void buffer_base::set_null_pointer()
{
ptr = nullptr;
}
void buffer_base::set_status(const bool status)
{
is_free = status;
}
void buffer_base::set_id(const int idx)
{
id = idx;
}
// template< MemType mem >
// void buffer<mem>::reallocate(const size_t required_size)
// {
// memproc::realloc<mem>((void *&)ptr, allocated_size, required_size);
// }
// template< MemType mem >
// void buffer<mem>::deallocate()
// {
// memproc::deallocate<mem>((void *&)ptr, allocated_size);
// }
// template class buffer<MemType::CPU>;
// #ifdef INCLUDE_CUDA
// template class buffer<MemType::GPU>;
// #endif
\ No newline at end of file
#pragma once
#include "TemplateParameters.h"
#include <cstdlib>
class buffer_base
{
public:
void *ptr;
size_t allocated_size;
size_t scalar_size;
bool is_free;
int id;
MemType mem;
buffer_base(); // Done
buffer_base(const size_t required_size, const MemType mem); // Done
buffer_base(const buffer_base& other); // Done
buffer_base(buffer_base&& other); // Done
~buffer_base(); // Done
bool is_available() const; // Done
void* get_ptr(); // Done
bool get_status() const; // Done
size_t get_size() const; // Done
int get_id() const; // Done
void set_allocated_size(const size_t required_size); // Done
void set_status(const bool status); // Done
void set_id(const int id); // Done
void set_null_pointer(); // Done
buffer_base& operator=(const buffer_base& other); // Done
buffer_base& operator=(buffer_base&& other); // Done
void reallocate(const size_t required_size);
void deallocate();
};
// namespace dyn_dispatch_buffer
// {
// void reallocate(buffer_base* obj, const size_t required_size);
// void deallocate(buffer_base* obj);
// };
// template< MemType mem >
// class buffer: public buffer_base
// {
// private:
// using buffer_base::ptr;
// using buffer_base::allocated_size;
// using buffer_base::scalar_size;
// using buffer_base::is_free;
// using buffer_base::id;
// public:
// buffer(const buffer<mem>& other) : buffer_base(other) {}
// buffer(buffer<mem>&& other) : buffer_base(other) {}
// buffer() : buffer_base() {}
// ~buffer(){};
// buffer(const size_t required_size) : buffer_base(required_size) {}
// void reallocate(const size_t required_size) override;
// void deallocate() override;
// };
\ No newline at end of file
#include "memory-faucet.h"
#include "mf-utils.h"
#include "MemoryProcessing.h"
#ifdef INCLUDE_CUDA
#include "MemoryProcessing.cuh"
#endif
template< MemType mem >
buffer<mem>::buffer()
{
ptr = nullptr;
allocated_size = 0;
scalar_size = 0;
is_free = true;
id = -1;
}
template< MemType mem >
buffer<mem>::~buffer()
{
memproc::dealloc<mem>((void *&)ptr, allocated_size);
}
template< MemType mem >
bool buffer<mem>::is_available() const
{
return is_free;
}
template< MemType mem >
void buffer<mem>::reallocate(const size_t required_size)
{
memproc::realloc<mem>((void *&)ptr, allocated_size, required_size);
}
template< MemType mem >
buffer<mem>::buffer(const size_t required_size)
{
ptr = nullptr;
allocated_size = 0;
reallocate(required_size);
scalar_size = 0;
is_free = false;
}
template< MemType mem >
bool free_size_comparator(const buffer<mem>& obj, const size_t required_size)
{
if(obj.get_status() == false)
return false;
return obj.get_size() <= required_size;
}
template bool free_size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
#ifdef INCLUDE_CUDA
template bool free_size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
#endif
template< MemType mem >
bool size_comparator(const size_t required_size, const buffer<mem>& obj)
{
return required_size < obj.get_size();
}
template bool size_comparator(const size_t required_size, const buffer<MemType::CPU>& obj);
#ifdef INCLUDE_CUDA
template bool size_comparator(const size_t required_size, const buffer<MemType::GPU>& obj);
#endif
template< MemType mem >
void* buffer<mem>::get_ptr()
{
return ptr;
}
template< MemType mem >
bool buffer<mem>::get_status() const
{
return is_free;
}
template< MemType mem >
size_t buffer<mem>::get_size() const
{
return allocated_size;
}
template< MemType mem >
int buffer<mem>::get_id() const
{
return id;
}
template< MemType mem >
void buffer<mem>::set_allocated_size(const size_t required_size)
{
allocated_size = required_size;
}
template< MemType mem >
void buffer<mem>::set_null_pointer()
{
ptr = nullptr;
}
template< MemType mem >
buffer<mem>::buffer(const buffer<mem>& other)
{
allocated_size = 0;
reallocate(other.get_size());
is_free = other.get_status();
id = other.get_id();
}
template< MemType mem >
buffer<mem>::buffer(buffer<mem>&& other)
{
ptr = other.ptr;
allocated_size = other.allocated_size;
scalar_size = other.scalar_size;
is_free = other.is_free;
id = other.id;
other.ptr = nullptr;
other.allocated_size = 0;
other.scalar_size = 0;
other.is_free = false;
other.id = -1;
}
template< MemType mem >
buffer<mem>& buffer<mem>::operator=(const buffer<mem>& other)
{
if (this == &other)
return *this;
reallocate(other.get_size());
is_free = other.get_status();
id = other.get_id();
return *this;
}
template< MemType mem >
buffer<mem>& buffer<mem>::operator=(buffer<mem>&& other)
{
if (this != &other)
{
memproc::dealloc<mem>((void *&)ptr, allocated_size);
ptr = other.ptr;
allocated_size = other.allocated_size;
scalar_size = other.scalar_size;
is_free = other.is_free;
id = other.id;
other.ptr = nullptr;
other.allocated_size = 0;
other.scalar_size = 0;
other.is_free = false;
other.id = -1;
}
return *this;
}
template< MemType mem >
void buffer<mem>::set_status(const bool status)
{
is_free = status;
}
template< MemType mem >
void buffer<mem>::set_id(const int idx)
{
id = idx;
}
template class buffer<MemType::CPU>;
#ifdef INCLUDE_CUDA
template class buffer<MemType::GPU>;
#endif
memory_pipe_base::memory_pipe_base()
{
#ifdef INCLUDE_CUDA
gpu_buff = std::vector<buffer<MemType::GPU> > ();
#endif
cpu_buff = std::vector<buffer<MemType::CPU> > ();
}
memory_pipe_base::~memory_pipe_base()
{
#ifdef INCLUDE_CUDA
gpu_buff.clear();
#endif
cpu_buff.clear();
}
template< >
std::vector<buffer<MemType::CPU> >& memory_pipe_base::get_memtyped_vector()
template< MemType mem, buf_choose_policy choose_type >
memory_pipe<mem, choose_type>::memory_pipe()
{
return cpu_buff;
buff_vec = std::vector<buffer_base > ();
}
#ifdef INCLUDE_CUDA
template<>
std::vector<buffer<MemType::GPU> >& memory_pipe_base::get_memtyped_vector()
template< MemType mem, buf_choose_policy choose_type >
memory_pipe<mem, choose_type>::~memory_pipe()
{
return gpu_buff;
buff_vec.clear();
}
#endif
template< MemType mem >
void memory_pipe_base::set_available(const int id)
template< MemType mem, buf_choose_policy choose_type >
void memory_pipe<mem, choose_type>::set_available(const int id)
{
get_memtyped_vector<mem>()[id].set_status(true);
buff_vec[id].set_status(true);
}
template void memory_pipe_base::set_available<MemType::CPU>(const int id);
#ifdef INCLUDE_CUDA
template void memory_pipe_base::set_available<MemType::GPU>(const int id);
#endif
template< MemType mem >
typename std::vector<buffer<mem>>::iterator get_lower_bound(typename std::vector<buffer<mem>>::iterator first,
typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const buffer<mem>&, const size_t) >& comp)
template< MemType mem, buf_choose_policy choose_type >
std::vector<buffer_base >& memory_pipe<mem, choose_type>::get_buff_vec()
{
typename std::vector<buffer<mem>>::iterator it;
typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
count = std::distance(first, last);
while (count > 0)
{
it = first;
step = count / 2;
it = std::next(it, step);
if (comp(*it, value))
{
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first;
return buff_vec;
}
template typename std::vector<buffer<MemType::CPU>>::iterator get_lower_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first,
typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::CPU>&, const size_t)>& );
#ifdef INCLUDE_CUDA
template typename std::vector<buffer<MemType::GPU>>::iterator get_lower_bound<MemType::GPU >(typename std::vector<buffer<MemType::GPU>>::iterator first,
typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::GPU>&, const size_t)>& );
#endif
template< MemType mem >
typename std::vector<buffer<mem>>::iterator get_upper_bound(typename std::vector<buffer<mem>>::iterator first,
typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<mem>&) >& comp)
{
typename std::vector<buffer<mem>>::iterator it;
typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
count = std::distance(first, last);
while (count > 0)
{
it = first;
step = count / 2;
it = std::next(it, step);
if (!comp(value, *it))
{
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first;
}
template class memory_pipe<MemType::CPU, buf_choose_policy::naive>;
template class memory_pipe<MemType::CPU, buf_choose_policy::sorted_vec>;
template class memory_pipe<MemType::CPU, buf_choose_policy::find_best_unsorted>;
template typename std::vector<buffer<MemType::CPU>>::iterator get_upper_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first,
typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::CPU>&)>& );
#ifdef INCLUDE_CUDA
template typename std::vector<buffer<MemType::GPU>>::iterator get_upper_bound<MemType::GPU >(typename std::vector<buffer<MemType::GPU>>::iterator first,
typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::GPU>&)>& );
template class memory_pipe<MemType::GPU, buf_choose_policy::naive>;
template class memory_pipe<MemType::GPU, buf_choose_policy::sorted_vec>;
template class memory_pipe<MemType::GPU, buf_choose_policy::find_best_unsorted>;
#endif
template<buf_choose_policy choose_type>
template< MemType mem >
int memory_pipe<choose_type>::get_buffer(const size_t required_size, void ** ptr)
{
std::vector<buffer<mem> >& buff_vec = get_memtyped_vector<mem>();
const int allocated_buffer_n = buff_vec.size();
for (int i = 0; i < allocated_buffer_n; i++)
{
const bool is_free = buff_vec[i].get_status();
const int avail_size = buff_vec[i].get_size();
if(is_free == true && required_size <= avail_size)
{
(*ptr) = buff_vec[i].get_ptr();
buff_vec[i].set_status(false);
return i;
}
}
buff_vec.push_back(buffer<mem>(required_size));
(*ptr) = buff_vec.back().get_ptr();
int id = buff_vec.size() - 1;
buff_vec.back().set_status(false);
buff_vec.back().set_id(id);
return id;
}
template int memory_pipe<buf_choose_policy::naive>::get_buffer<MemType::CPU>(const size_t required_size, void ** ptr);
#ifdef INCLUDE_CUDA
template int memory_pipe<buf_choose_policy::naive>::get_buffer<MemType::GPU>(const size_t required_size, void ** ptr);
#endif
// template< >
template< MemType mem >
int memory_pipe<buf_choose_policy::sorted_vec>::get_buffer(const size_t required_size, void ** ptr)
template< MemType mem, buf_choose_policy choose_type >
memory_pipe<mem, choose_type>& memory_faucet::get_faucet()
{
std::vector<buffer<mem> >& buff_vec = get_memtyped_vector<mem>();
const int allocated_buffer_n = buff_vec.size();
for (int i = 0; i < allocated_buffer_n; i++)
{
const bool is_free = buff_vec[i].get_status();
const int avail_size = buff_vec[i].get_size();
if(is_free == true && required_size <= avail_size)
{
(*ptr) = buff_vec[i].get_ptr();
buff_vec[i].set_status(false);
return i;
}
}
typename std::vector<buffer<mem>>::iterator buf_it;
std::function<bool (const size_t, const buffer<mem>&) > comparator = size_comparator<mem>;
buf_it = get_upper_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
buf_it = buff_vec.insert(buf_it, buffer<mem>(required_size));
(*ptr) = buf_it->get_ptr();
int id = std::distance(buff_vec.begin(), buf_it);
buff_vec.back().set_id(id);
return id;
static memory_pipe<mem, choose_type> mem_pipe = memory_pipe<mem, choose_type>();
return mem_pipe;
}
template int memory_pipe<buf_choose_policy::sorted_vec>::get_buffer<MemType::CPU>(const size_t required_size, void ** ptr);
#ifdef INCLUDE_CUDA
template int memory_pipe<buf_choose_policy::sorted_vec>::get_buffer<MemType::GPU>(const size_t required_size, void ** ptr);
#endif
// template< >
template< MemType mem >
int memory_pipe<buf_choose_policy::find_best_unsorted>::get_buffer(const size_t required_size, void ** ptr)
{
std::vector<buffer<mem> >& buff_vec = get_memtyped_vector<mem>();
typename std::vector<buffer<mem>>::iterator available_buf_it;
std::function<bool (const buffer<mem>&, const size_t) > comparator = free_size_comparator<mem>;
available_buf_it = get_lower_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
if(available_buf_it != buff_vec.end())
{
(*ptr) = available_buf_it->get_ptr();
int id = std::distance(buff_vec.begin(), available_buf_it);
buff_vec[id].set_status(false);
return id;
}
else
{
buff_vec.push_back(buffer<mem>(required_size));
(*ptr) = buff_vec.back().get_ptr();
int id = buff_vec.size() - 1;
buff_vec.back().set_id(id);
return id;
}
}
template memory_pipe<MemType::CPU, buf_choose_policy::naive>& memory_faucet::get_faucet();
template memory_pipe<MemType::CPU, buf_choose_policy::sorted_vec>& memory_faucet::get_faucet();
template memory_pipe<MemType::CPU, buf_choose_policy::find_best_unsorted>& memory_faucet::get_faucet();
template int memory_pipe<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::CPU>(const size_t required_size, void ** ptr);
#ifdef INCLUDE_CUDA
template int memory_pipe<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::GPU>(const size_t required_size, void ** ptr);
template memory_pipe<MemType::GPU, buf_choose_policy::naive>& memory_faucet::get_faucet();
template memory_pipe<MemType::GPU, buf_choose_policy::sorted_vec>& memory_faucet::get_faucet();
template memory_pipe<MemType::GPU, buf_choose_policy::find_best_unsorted>& memory_faucet::get_faucet();
#endif
template< >
memory_pipe<buf_choose_policy::naive>& memory_faucet::get_faucet()
{
static memory_pipe<buf_choose_policy::naive> mem_pipe_naive = memory_pipe<buf_choose_policy::naive>();
return mem_pipe_naive;
}
template< >
memory_pipe<buf_choose_policy::sorted_vec>& memory_faucet::get_faucet()
{
static memory_pipe<buf_choose_policy::sorted_vec> mem_pipe_sorted = memory_pipe<buf_choose_policy::sorted_vec>();
return mem_pipe_sorted;
}
template< >
memory_pipe<buf_choose_policy::find_best_unsorted>& memory_faucet::get_faucet()
{
static memory_pipe<buf_choose_policy::find_best_unsorted> mem_pipe_unsorted = memory_pipe<buf_choose_policy::find_best_unsorted>();
return mem_pipe_unsorted;
}
template< MemType mem, buf_choose_policy choose_type >
memBuf<mem, choose_type>::memBuf(const size_t required_size)
{
memory_pipe<choose_type>& mem_pipe = memory_faucet::get_faucet<choose_type>();
id = mem_pipe.template get_buffer<mem>(required_size, &buf);
memory_pipe<mem, choose_type>& mem_pipe = memory_faucet::get_faucet<mem, choose_type>();
id = mf_utils::get_buffer<choose_type>(required_size, mem_pipe.get_buff_vec(), mem, &buf);
size = required_size;
}
template< MemType mem, buf_choose_policy choose_type >
memBuf<mem, choose_type>::~memBuf()
{
memory_pipe<choose_type>& mem_pipe = memory_faucet::get_faucet<choose_type>();
mem_pipe.template set_available<mem>(id);
memory_pipe<mem, choose_type>& mem_pipe = memory_faucet::get_faucet<mem, choose_type>();
mem_pipe.set_available(id);
buf = nullptr;
}
......@@ -431,7 +81,7 @@ void* memBuf<mem, choose_type>::ptr()
}
template< MemType mem, buf_choose_policy choose_type >
int memBuf<mem, choose_type>::get_size()
int memBuf<mem, choose_type>::get_size() const
{
return size;
}
......
#pragma once
#include <cstdlib>
#include <vector>
#include <functional>
#include "mf-utils.h"
#include "TemplateParameters.h"
enum class buf_choose_policy
{
naive,
sorted_vec,
find_best_unsorted
};
template< MemType mem >
class buffer
{
public:
void *ptr;
size_t allocated_size;
size_t scalar_size;
bool is_free;
int id;
buffer(const buffer<mem>& other);
buffer(buffer<mem>&& other);
buffer();
~buffer();
buffer(const size_t required_size);
bool is_available() const;
void* get_ptr();
bool get_status() const;
size_t get_size() const;
int get_id() const;
buffer<mem>& operator=(const buffer<mem>& other);
buffer<mem>& operator=(buffer<mem>&& other);
void reallocate(const size_t required_size);
void set_allocated_size(const size_t required_size);
void set_status(const bool status);
void set_id(const int id);
void set_null_pointer();
};
class memory_pipe_base
template< MemType mem = MemType::CPU, buf_choose_policy choose_type = buf_choose_policy::naive >
class memory_pipe
{
private:
std::vector<buffer_base > buff_vec;
public:
#ifdef INCLUDE_CUDA
std::vector<buffer<MemType::GPU> > gpu_buff;
#endif
std::vector<buffer<MemType::CPU> > cpu_buff;
memory_pipe_base();
~memory_pipe_base();
template< MemType mem >
memory_pipe(/* args */);
~memory_pipe();
void set_available(const int id);
template< MemType mem >
std::vector<buffer<mem> >& get_memtyped_vector();
};
template<buf_choose_policy choose_type = buf_choose_policy::naive>
class memory_pipe: public memory_pipe_base
{
public:
memory_pipe(/* args */) : memory_pipe_base() {}
~memory_pipe() = default;
template< MemType mem >
int get_buffer(const size_t required_size, void ** ptr);
};
template<>
class memory_pipe<buf_choose_policy::sorted_vec>: public memory_pipe_base
{
public:
memory_pipe(/* args */) : memory_pipe_base() {}
~memory_pipe() = default;
template< MemType mem >
int get_buffer(const size_t required_size, void ** ptr);
};
template<>
class memory_pipe<buf_choose_policy::find_best_unsorted>: public memory_pipe_base
{
public:
memory_pipe(/* args */) : memory_pipe_base() {}
~memory_pipe() = default;
template< MemType mem >
int get_buffer(const size_t required_size, void ** ptr);
std::vector<buffer_base >& get_buff_vec();
};
class memory_faucet
......@@ -104,11 +22,11 @@ private:
public:
template<buf_choose_policy choose_type = buf_choose_policy::naive>
static memory_pipe<choose_type>& get_faucet();
template< MemType mem = MemType::CPU, buf_choose_policy choose_type = buf_choose_policy::naive >
static memory_pipe<mem, choose_type>& get_faucet();
};
template< MemType mem, buf_choose_policy choose_type = buf_choose_policy::naive >
template< MemType mem = MemType::CPU, buf_choose_policy choose_type = buf_choose_policy::naive >
class memBuf
{
private:
......@@ -120,5 +38,5 @@ public:
memBuf(const size_t required_size);
~memBuf();
void* ptr();
int get_size();
int get_size() const;
};
\ No newline at end of file
#include "mf-utils.h"
bool mf_utils::free_size_comparator(const buffer_base& obj, const size_t required_size)
{
if(obj.get_status() == false)
return false;
return obj.get_size() <= required_size;
}
bool mf_utils::size_comparator(const size_t required_size, const buffer_base& obj)
{
return required_size < obj.get_size();
}
typename std::vector<buffer_base>::iterator mf_utils::get_lower_bound(
typename std::vector<buffer_base>::iterator first,
typename std::vector<buffer_base>::iterator last, const size_t value,
std::function<bool (const buffer_base&, const size_t) >& comp)
{
typename std::vector<buffer_base>::iterator it;
typename std::iterator_traits<typename std::vector<buffer_base>::iterator>::difference_type count, step;
count = std::distance(first, last);
while (count > 0)
{
it = first;
step = count / 2;
it = std::next(it, step);
if (comp(*it, value))
{
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first;
}
typename std::vector<buffer_base>::iterator mf_utils::get_upper_bound(
typename std::vector<buffer_base>::iterator first,
typename std::vector<buffer_base>::iterator last, const size_t value,
std::function<bool (const size_t, const buffer_base&) >& comp)
{
typename std::vector<buffer_base>::iterator it;
typename std::iterator_traits<typename std::vector<buffer_base>::iterator>::difference_type count, step;
count = std::distance(first, last);
while (count > 0)
{
it = first;
step = count / 2;
it = std::next(it, step);
if (!comp(value, *it))
{
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first;
}
template<>
int mf_utils::get_buffer<buf_choose_policy::naive>(const size_t required_size,
std::vector<buffer_base>& buff_vec,
const MemType mem,
void ** ptr)
{
const int allocated_buffer_n = buff_vec.size();
for (int i = 0; i < allocated_buffer_n; i++)
{
const bool is_free = buff_vec[i].get_status();
const int avail_size = buff_vec[i].get_size();
if(is_free == true && required_size <= avail_size)
{
(*ptr) = buff_vec[i].get_ptr();
buff_vec[i].set_status(false);
return i;
}
}
buff_vec.push_back(buffer_base(required_size, mem));
(*ptr) = buff_vec.back().get_ptr();
int id = buff_vec.size() - 1;
buff_vec.back().set_status(false);
buff_vec.back().set_id(id);
return id;
}
template<>
int mf_utils::get_buffer<buf_choose_policy::sorted_vec>(const size_t required_size,
std::vector<buffer_base>& buff_vec,
const MemType mem,
void ** ptr)
{
const int allocated_buffer_n = buff_vec.size();
for (int i = 0; i < allocated_buffer_n; i++)
{
const bool is_free = buff_vec[i].get_status();
const int avail_size = buff_vec[i].get_size();
if(is_free == true && required_size <= avail_size)
{
(*ptr) = buff_vec[i].get_ptr();
buff_vec[i].set_status(false);
return i;
}
}
typename std::vector<buffer_base>::iterator buf_it;
std::function<bool (const size_t, const buffer_base&) > comparator = mf_utils::size_comparator;
buf_it = mf_utils::get_upper_bound (buff_vec.begin(), buff_vec.end(), required_size, comparator);
buf_it = buff_vec.insert(buf_it, buffer_base(required_size, mem));
(*ptr) = buf_it->get_ptr();
int id = std::distance(buff_vec.begin(), buf_it);
buff_vec.back().set_id(id);
return id;
}
template<>
int mf_utils::get_buffer<buf_choose_policy::find_best_unsorted>(const size_t required_size,
std::vector<buffer_base>& buff_vec,
const MemType mem,
void ** ptr)
{
typename std::vector<buffer_base>::iterator available_buf_it;
std::function<bool (const buffer_base&, const size_t) > comparator = mf_utils::free_size_comparator;
available_buf_it = mf_utils::get_lower_bound (buff_vec.begin(), buff_vec.end(), required_size, comparator);
if(available_buf_it != buff_vec.end())
{
(*ptr) = available_buf_it->get_ptr();
int id = std::distance(buff_vec.begin(), available_buf_it);
buff_vec[id].set_status(false);
return id;
}
else
{
buff_vec.push_back(buffer_base(required_size, mem));
(*ptr) = buff_vec.back().get_ptr();
int id = buff_vec.size() - 1;
buff_vec.back().set_id(id);
return id;
}
}
\ No newline at end of file
#pragma once
#include "buffer.h"
#include <functional>
#include <vector>
enum class buf_choose_policy
{
naive,
sorted_vec,
find_best_unsorted
};
namespace mf_utils
{
bool free_size_comparator(const buffer_base& obj, const size_t required_size);
bool size_comparator(const size_t required_size, const buffer_base& obj);
typename std::vector<buffer_base>::iterator get_lower_bound(
typename std::vector<buffer_base>::iterator first,
typename std::vector<buffer_base>::iterator last, const size_t value,
std::function<bool (const buffer_base&, const size_t) >& comp);
typename std::vector<buffer_base>::iterator get_upper_bound(
typename std::vector<buffer_base>::iterator first,
typename std::vector<buffer_base>::iterator last, const size_t value,
std::function<bool (const size_t, const buffer_base&) >& comp);
template<buf_choose_policy choose_type = buf_choose_policy::naive>
int get_buffer(const size_t required_size,
std::vector<buffer_base>& buff_vec,
const MemType mem,
void ** ptr);
}
\ No newline at end of file
......@@ -25,8 +25,8 @@ int main(void)
printf("Start Buf6\n");
memBuf<MemType::CPU> Buf6(required_size6);
printf("Start Buf7\n");
memBuf<MemType::GPU> Buf7(required_size6);
// printf("Start Buf7\n");
// memBuf<MemType::GPU> Buf7(required_size6);
float* ptr1 = static_cast<float*>(Buf1.ptr());
float* ptr2 = static_cast<float*>(Buf2.ptr());
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment