#include "memory-holder.h" #include "MemoryProcessing.h" #ifdef INCLUDE_CUDA #include "MemoryProcessing.cuh" #endif #include <algorithm> template< MemType mem > buffer<mem>::buffer() { ptr = nullptr; allocated_size = 0; scalar_size = 0; is_free = true; id = -1; } template< MemType mem > buffer<mem>::~buffer() { memproc::dealloc<mem>((void *&)ptr, allocated_size); } template< MemType mem > bool buffer<mem>::is_available() const { return is_free; } template< MemType mem > void buffer<mem>::reallocate(const size_t required_size) { memproc::realloc<mem>((void *&)ptr, allocated_size, required_size); } template< MemType mem > buffer<mem>::buffer(const size_t required_size) { reallocate(required_size); scalar_size = 0; is_free = false; } template< MemType mem > bool size_comparator(const buffer<mem>& obj, const size_t required_size) { if(obj.get_status() == false) return false; return obj.get_size() <= required_size; } template bool size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size); #ifdef INCLUDE_CUDA template bool size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size); #endif template< MemType mem > void* buffer<mem>::get_ptr() const { return ptr; } template< MemType mem > bool buffer<mem>::get_status() const { return is_free; } template< MemType mem > size_t buffer<mem>::get_size() const { return allocated_size; } template< MemType mem > buffer<mem>& buffer<mem>::operator=(buffer<mem>& other) { if (this == &other) return *this; // memproc::realloc<mem>((void*&)ptr, allocated_size, other.get_size()); std::swap(ptr, other.ptr); std::swap(allocated_size, other.allocated_size); // exchange resources between *this and other std::swap(scalar_size, other.scalar_size); std::swap(is_free, other.is_free); std::swap(id, other.id); return *this; } template< MemType mem > void buffer<mem>::set_status(const bool status) { is_free = status; } template< MemType mem > void buffer<mem>::set_id(const int idx) { id = idx; } template class buffer<MemType::CPU>; #ifdef INCLUDE_CUDA template class buffer<MemType::GPU>; #endif memory_pipline::memory_pipline() { #ifdef INCLUDE_CUDA gpu_buff = std::vector<buffer<MemType::GPU> > (); #endif cpu_buff = std::vector<buffer<MemType::CPU> > (); } memory_pipline::~memory_pipline() { #ifdef INCLUDE_CUDA gpu_buff.clear(); #endif cpu_buff.clear(); } template< MemType mem > std::vector<buffer<mem> >& memory_pipline::get_memtyped_vector() { return cpu_buff; } #ifdef INCLUDE_CUDA template<> std::vector<buffer<MemType::GPU> >& memory_pipline::get_memtyped_vector() { return gpu_buff; } #endif template< MemType mem > int memory_pipline::get_buffer(const size_t required_size, void * ptr) { typename std::vector<buffer<mem>>::iterator available_buf_it; available_buf_it = std::lower_bound (get_memtyped_vector<mem>().begin(), get_memtyped_vector<mem>().end(), required_size, size_comparator<mem>); if(available_buf_it != get_memtyped_vector<mem>().end()) { ptr = available_buf_it->get_ptr(); int id = std::distance(get_memtyped_vector<mem>().begin(), available_buf_it); return id; } else { get_memtyped_vector<mem>().push_back(buffer<mem>(required_size)); ptr = get_memtyped_vector<mem>().back().get_ptr(); int id = get_memtyped_vector<mem>().size() - 1; return id; } } template int memory_pipline::get_buffer<MemType::CPU>(const size_t required_size, void * ptr); #ifdef INCLUDE_CUDA template int memory_pipline::get_buffer<MemType::GPU>(const size_t required_size, void * ptr); #endif template< MemType mem > void memory_pipline::set_available(const int id) { get_memtyped_vector<mem>()[id].set_status(true); } template void memory_pipline::set_available<MemType::CPU>(const int id); #ifdef INCLUDE_CUDA template void memory_pipline::set_available<MemType::GPU>(const int id); #endif memory_faucet::memory_faucet(){} memory_pipline* memory_faucet::get_faucet() { if(mem_pipe == nullptr) { mem_pipe = new memory_pipline(); } return mem_pipe; } memory_pipline* memory_faucet::mem_pipe = nullptr; template< MemType mem > memBuf<mem>::memBuf(const size_t required_size) { memory_pipline* mem_pipe = memory_faucet::get_faucet(); id = mem_pipe->get_buffer<mem>(required_size, buf); size = required_size; } template< MemType mem > memBuf<mem>::~memBuf() { // memory_pipline* mem_pipe = memory_faucet::get_faucet(); // mem_pipe->set_available<mem>(id); } template< MemType mem > void* memBuf<mem>::ptr() { return buf; } template< MemType mem > int memBuf<mem>::get_size() { return size; } template class memBuf<MemType::CPU>; #ifdef INCLUDE_CUDA template class memBuf<MemType::GPU>; #endif