Skip to content
Snippets Groups Projects
Select Git revision
  • 304cd577afd0f7ac74baaa6408bd7693f9cb1607
  • main default protected
2 results

memory-faucet.cpp

Blame
  • memory-faucet.cpp 12.74 KiB
    #include "memory-faucet.h"
    #include "MemoryProcessing.h"
    #ifdef INCLUDE_CUDA
    #include "MemoryProcessing.cuh"
    #endif
    
    template< MemType mem >
    buffer<mem>::buffer()
    {
        ptr = nullptr;
        allocated_size = 0;
        scalar_size = 0;
        is_free = true;
        id = -1;
    }
    
    template< MemType mem >
    buffer<mem>::~buffer()
    {
        memproc::dealloc<mem>((void *&)ptr, allocated_size);
    }
    
    template< MemType mem >
    bool buffer<mem>::is_available() const
    {
        return is_free;
    }
    
    template< MemType mem >
    void buffer<mem>::reallocate(const size_t required_size)
    {
        memproc::realloc<mem>((void *&)ptr, allocated_size, required_size);
    }
    
    template< MemType mem >
    buffer<mem>::buffer(const size_t required_size)
    {
        ptr = nullptr;
        allocated_size = 0;
        reallocate(required_size);
        scalar_size = 0;
        is_free = false;
    }
    
    template< MemType mem >
    bool free_size_comparator(const buffer<mem>& obj, const size_t required_size)
    {
        if(obj.get_status() == false)
            return false;
    
        return obj.get_size() <= required_size;
    }
    
    template bool free_size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
    #ifdef INCLUDE_CUDA
    template bool free_size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
    #endif
    
    template< MemType mem >
    bool size_comparator(const size_t required_size, const buffer<mem>& obj)
    {
        return required_size < obj.get_size();
    }
    
    template bool size_comparator(const size_t required_size, const buffer<MemType::CPU>& obj);
    #ifdef INCLUDE_CUDA
    template bool size_comparator(const size_t required_size, const buffer<MemType::GPU>& obj);
    #endif
    
    template< MemType mem >
    void* buffer<mem>::get_ptr()
    {
        return ptr;
    }
    
    template< MemType mem >
    bool buffer<mem>::get_status() const
    {
        return is_free;
    }
    
    template< MemType mem >
    size_t buffer<mem>::get_size() const
    {
        return allocated_size;
    }
    
    template< MemType mem >
    int buffer<mem>::get_id() const
    {
        return id;
    }
    
    template< MemType mem >
    void buffer<mem>::set_allocated_size(const size_t required_size) 
    {
        allocated_size = required_size;
    }
    
    template< MemType mem >
    void buffer<mem>::set_null_pointer()
    {
        ptr = nullptr;
    }
    
    template< MemType mem >
    buffer<mem>::buffer(const buffer<mem>& other)
    {    
        allocated_size = 0;
        reallocate(other.get_size());
        is_free = other.get_status();
        id = other.get_id();
    }
    
    template< MemType mem >
    buffer<mem>::buffer(buffer<mem>&& other) 
    {    
        ptr = other.ptr;
        allocated_size = other.allocated_size;
        scalar_size = other.scalar_size;
        is_free = other.is_free;
        id = other.id;
    
        other.ptr = nullptr;
        other.allocated_size = 0;
        other.scalar_size = 0;
        other.is_free = false;
        other.id = -1;
    }
    
    template< MemType mem >
    buffer<mem>& buffer<mem>::operator=(const buffer<mem>& other)
    {
        if (this == &other)
            return *this;
     
        reallocate(other.get_size());
        is_free = other.get_status();
        id = other.get_id();
    
        return *this;
    }
    
    template< MemType mem >
    buffer<mem>& buffer<mem>::operator=(buffer<mem>&& other) 
    {
        if (this != &other)
        {
            memproc::dealloc<mem>((void *&)ptr, allocated_size);
            ptr = other.ptr;
            allocated_size = other.allocated_size;
            scalar_size = other.scalar_size;
            is_free = other.is_free;
            id = other.id;
    
            other.ptr = nullptr;
            other.allocated_size = 0;
            other.scalar_size = 0;
            other.is_free = false;
            other.id = -1;
        }
    
        return *this;
    }
    
    template< MemType mem >
    void buffer<mem>::set_status(const bool status)
    {
        is_free = status;
    }
    
    template< MemType mem >
    void buffer<mem>::set_id(const int idx)
    {
        id = idx;
    }
    
    template class buffer<MemType::CPU>;
    #ifdef INCLUDE_CUDA
    template class buffer<MemType::GPU>;
    #endif
    
    memory_pipe_base::memory_pipe_base()
    {
    #ifdef INCLUDE_CUDA
        gpu_buff = std::vector<buffer<MemType::GPU> > ();
    #endif
        cpu_buff = std::vector<buffer<MemType::CPU> > ();
    }
    
    memory_pipe_base::~memory_pipe_base()
    {
    #ifdef INCLUDE_CUDA
        gpu_buff.clear();
    #endif
        cpu_buff.clear();
    }
    
    template< >
    std::vector<buffer<MemType::CPU> >& memory_pipe_base::get_memtyped_vector()
    {
        return cpu_buff;
    }
    
    #ifdef INCLUDE_CUDA
    template<>
    std::vector<buffer<MemType::GPU> >& memory_pipe_base::get_memtyped_vector()
    {
        return gpu_buff;
    }
    #endif
    
    template< MemType mem >
    void memory_pipe_base::set_available(const int id)
    {
        get_memtyped_vector<mem>()[id].set_status(true);
    }
    
    template void memory_pipe_base::set_available<MemType::CPU>(const int id);
    #ifdef INCLUDE_CUDA
    template void memory_pipe_base::set_available<MemType::GPU>(const int id);
    #endif
    
    template< MemType mem >
    typename std::vector<buffer<mem>>::iterator get_lower_bound(typename std::vector<buffer<mem>>::iterator first, 
        typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const buffer<mem>&, const size_t) >& comp)
    {
        typename std::vector<buffer<mem>>::iterator it;
        typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
        count = std::distance(first, last);
     
        while (count > 0)
        {
            it = first;
            step = count / 2;
            it = std::next(it, step);
     
            if (comp(*it, value))
            {
                first = ++it;
                count -= step + 1;
            }
            else
                count = step;
        }
     
        return first;
    }
    
    template typename std::vector<buffer<MemType::CPU>>::iterator get_lower_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first, 
        typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::CPU>&, const size_t)>& );
    #ifdef INCLUDE_CUDA
    template typename std::vector<buffer<MemType::GPU>>::iterator get_lower_bound<MemType::GPU >(typename std::vector<buffer<MemType::GPU>>::iterator first, 
        typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::GPU>&, const size_t)>& );
    #endif
    
    template< MemType mem >
    typename std::vector<buffer<mem>>::iterator get_upper_bound(typename std::vector<buffer<mem>>::iterator first, 
        typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<mem>&) >& comp)
    {
        typename std::vector<buffer<mem>>::iterator it;
        typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
        count = std::distance(first, last);
     
        while (count > 0)
        {
            it = first; 
            step = count / 2;
            it = std::next(it, step);
     
            if (!comp(value, *it))
            {
                first = ++it;
                count -= step + 1;
            } 
            else
                count = step;
        }
     
        return first;
    }
    
    template typename std::vector<buffer<MemType::CPU>>::iterator get_upper_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first, 
        typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::CPU>&)>& );
    #ifdef INCLUDE_CUDA
    template typename std::vector<buffer<MemType::GPU>>::iterator get_upper_bound<MemType::GPU >(typename std::vector<buffer<MemType::GPU>>::iterator first, 
        typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::GPU>&)>& );
    #endif
    
    template<buf_choose_policy choose_type>
    template< MemType mem >
    int memory_pipe<choose_type>::get_buffer(const size_t required_size, void ** ptr)
    {
        std::vector<buffer<mem> >& buff_vec = get_memtyped_vector<mem>();
        const int allocated_buffer_n = buff_vec.size();
        for (int i = 0; i < allocated_buffer_n; i++)
        {
            const bool is_free = buff_vec[i].get_status();
            const int avail_size = buff_vec[i].get_size();
    
            if(is_free == true && required_size <= avail_size)
            {
                (*ptr) = buff_vec[i].get_ptr();
                buff_vec[i].set_status(false);
                return i;
            }
        }
    
        buff_vec.push_back(buffer<mem>(required_size));
        
        (*ptr) = buff_vec.back().get_ptr();
        int id = buff_vec.size() - 1;
    
        buff_vec.back().set_status(false);
        buff_vec.back().set_id(id);
        
        return id;
    }
    
    template int memory_pipe<buf_choose_policy::naive>::get_buffer<MemType::CPU>(const size_t required_size, void ** ptr);
    #ifdef INCLUDE_CUDA
    template int memory_pipe<buf_choose_policy::naive>::get_buffer<MemType::GPU>(const size_t required_size, void ** ptr);
    #endif
    
    // template< >
    template< MemType mem >
    int memory_pipe<buf_choose_policy::sorted_vec>::get_buffer(const size_t required_size, void ** ptr)
    {
        std::vector<buffer<mem> >& buff_vec = get_memtyped_vector<mem>();
        const int allocated_buffer_n = buff_vec.size();
        for (int i = 0; i < allocated_buffer_n; i++)
        {
            const bool is_free = buff_vec[i].get_status();
            const int avail_size = buff_vec[i].get_size();
    
            if(is_free == true && required_size <= avail_size)
            {
                (*ptr) = buff_vec[i].get_ptr();
                buff_vec[i].set_status(false);
                return i;
            }
        }
    
        typename std::vector<buffer<mem>>::iterator buf_it;
        std::function<bool (const size_t, const buffer<mem>&) > comparator = size_comparator<mem>;
        buf_it = get_upper_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
        buf_it = buff_vec.insert(buf_it, buffer<mem>(required_size));
        (*ptr) = buf_it->get_ptr();
        int id = std::distance(buff_vec.begin(), buf_it);
        buff_vec.back().set_id(id);
        return id;
    }
    
    template int memory_pipe<buf_choose_policy::sorted_vec>::get_buffer<MemType::CPU>(const size_t required_size, void ** ptr);
    #ifdef INCLUDE_CUDA
    template int memory_pipe<buf_choose_policy::sorted_vec>::get_buffer<MemType::GPU>(const size_t required_size, void ** ptr);
    #endif
    
    // template< >
    template< MemType mem >
    int memory_pipe<buf_choose_policy::find_best_unsorted>::get_buffer(const size_t required_size, void ** ptr)
    {
        std::vector<buffer<mem> >& buff_vec = get_memtyped_vector<mem>();
        typename std::vector<buffer<mem>>::iterator available_buf_it;
        std::function<bool (const buffer<mem>&, const size_t) > comparator = free_size_comparator<mem>;
        available_buf_it = get_lower_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
        if(available_buf_it != buff_vec.end())
        {    
            (*ptr) = available_buf_it->get_ptr();
            int id = std::distance(buff_vec.begin(), available_buf_it);
            buff_vec[id].set_status(false);
            return id;
        }
        else
        {
            buff_vec.push_back(buffer<mem>(required_size));
            (*ptr) = buff_vec.back().get_ptr();
            int id = buff_vec.size() - 1;
            buff_vec.back().set_id(id);
            return id;
        }
    }
    
    template int memory_pipe<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::CPU>(const size_t required_size, void ** ptr);
    #ifdef INCLUDE_CUDA
    template int memory_pipe<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::GPU>(const size_t required_size, void ** ptr);
    #endif
    
    template< >
    memory_pipe<buf_choose_policy::naive>& memory_faucet::get_faucet()
    {
        static memory_pipe<buf_choose_policy::naive> mem_pipe_naive = memory_pipe<buf_choose_policy::naive>();
        return mem_pipe_naive;
    }
    
    template< >
    memory_pipe<buf_choose_policy::sorted_vec>& memory_faucet::get_faucet()
    {
        static memory_pipe<buf_choose_policy::sorted_vec> mem_pipe_sorted = memory_pipe<buf_choose_policy::sorted_vec>();
        return mem_pipe_sorted;
    }
    
    template< >
    memory_pipe<buf_choose_policy::find_best_unsorted>& memory_faucet::get_faucet()
    {
        static memory_pipe<buf_choose_policy::find_best_unsorted> mem_pipe_unsorted = memory_pipe<buf_choose_policy::find_best_unsorted>();
        return mem_pipe_unsorted;
    }
    
    
    template< MemType mem, buf_choose_policy choose_type >
    memBuf<mem, choose_type>::memBuf(const size_t required_size)
    {
        memory_pipe<choose_type>& mem_pipe = memory_faucet::get_faucet<choose_type>();
        id = mem_pipe.template get_buffer<mem>(required_size, &buf);
        size = required_size;
    }
    
    template< MemType mem, buf_choose_policy choose_type >
    memBuf<mem, choose_type>::~memBuf()
    {
        memory_pipe<choose_type>& mem_pipe = memory_faucet::get_faucet<choose_type>();
        mem_pipe.template set_available<mem>(id);
        buf = nullptr;
    }
    
    template< MemType mem, buf_choose_policy choose_type >
    void* memBuf<mem, choose_type>::ptr()
    {
        return buf;
    }
    
    template< MemType mem, buf_choose_policy choose_type >
    int memBuf<mem, choose_type>::get_size()
    {
        return size;
    }
    
    template class memBuf<MemType::CPU, buf_choose_policy::naive>;
    template class memBuf<MemType::CPU, buf_choose_policy::sorted_vec>;
    template class memBuf<MemType::CPU, buf_choose_policy::find_best_unsorted>;
    
    #ifdef INCLUDE_CUDA
    template class memBuf<MemType::GPU, buf_choose_policy::naive>;
    template class memBuf<MemType::GPU, buf_choose_policy::sorted_vec>;
    template class memBuf<MemType::GPU, buf_choose_policy::find_best_unsorted>;
    #endif