#include "memory-holder.h"
#include "MemoryProcessing.h"
#ifdef INCLUDE_CUDA
#include "MemoryProcessing.cuh"
#endif

#include <algorithm>
#include<stdio.h>

template< MemType mem >
buffer<mem>::buffer()
{
    ptr = nullptr;
    allocated_size = 0;
    scalar_size = 0;
    is_free = true;
    id = -1;
}

template< MemType mem >
buffer<mem>::~buffer()
{
    memproc::dealloc<mem>((void *&)ptr, allocated_size);
}

template< MemType mem >
bool buffer<mem>::is_available() const
{
    return is_free;
}

template< MemType mem >
void buffer<mem>::reallocate(const size_t required_size)
{
    memproc::realloc<mem>((void *&)ptr, allocated_size, required_size);
}

template< MemType mem >
buffer<mem>::buffer(const size_t required_size)
{
    allocated_size = 0;
    // printf("required_size in constructor %ld\n", required_size);
    reallocate(required_size);
    // printf("allocated_size in constructor %ld\n", allocated_size);
    scalar_size = 0;
    is_free = false;
}

// typedef bool (*Fun)(const buffer<MemType::CPU>&, const size_t);
// #ifdef INCLUDE_CUDA
// typedef bool (*Fun)(const buffer<MemType::GPU>&, const size_t);
// #endif

template< MemType mem >
bool free_size_comparator(const buffer<mem>& obj, const size_t required_size)
{
    if(obj.get_status() == false)
        return false;

    return obj.get_size() <= required_size;
}

template bool free_size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
#ifdef INCLUDE_CUDA
template bool free_size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
#endif

template< MemType mem >
bool size_comparator(const size_t required_size, const buffer<mem>& obj)
{
    return required_size < obj.get_size();
}

template bool size_comparator(const size_t required_size, const buffer<MemType::CPU>& obj);
#ifdef INCLUDE_CUDA
template bool size_comparator(const size_t required_size, const buffer<MemType::GPU>& obj);
#endif

template< MemType mem >
void* buffer<mem>::get_ptr()
{
    return ptr;
}

template< MemType mem >
bool buffer<mem>::get_status() const
{
    return is_free;
}

template< MemType mem >
size_t buffer<mem>::get_size() const
{
    return allocated_size;
}

template< MemType mem >
int buffer<mem>::get_id() const
{
    return id;
}

template< MemType mem >
void buffer<mem>::set_allocated_size(const size_t required_size) 
{
    allocated_size = required_size;
}

template< MemType mem >
buffer<mem>::buffer(const buffer<mem>& other)
{
    allocated_size = 0;
    // printf("Here in copy construct\n");
    // printf("allocated_size %ld\n", other.get_size());
    reallocate(other.get_size());
    is_free = other.get_status();
    id = other.get_id();
}

template< MemType mem >
buffer<mem>& buffer<mem>::operator=(buffer<mem>& other)
{
    // printf("Here in swap\n");

    if (this == &other)
        return *this;

    std::swap(ptr, other.ptr);
    allocated_size = other.get_size();
    other.set_allocated_size(size_t(0));
    is_free = other.get_status();
    id = other.get_id();
    return *this;
}

template< MemType mem >
buffer<mem>& buffer<mem>::operator=(const buffer<mem>& other)
{
    printf("Here in not swap\n");
    if (this == &other)
        return *this;
 
    reallocate(other.get_size());
    is_free = other.get_status();
    id = other.get_id();

    return *this;
}

template< MemType mem >
void buffer<mem>::set_status(const bool status)
{
    is_free = status;
}

template< MemType mem >
void buffer<mem>::set_id(const int idx)
{
    id = idx;
}

template class buffer<MemType::CPU>;
#ifdef INCLUDE_CUDA
template class buffer<MemType::GPU>;
#endif

memory_pipline_base::memory_pipline_base()
{
#ifdef INCLUDE_CUDA
    gpu_buff = std::vector<buffer<MemType::GPU> > ();
#endif
    cpu_buff = std::vector<buffer<MemType::CPU> > ();
}

memory_pipline_base::~memory_pipline_base()
{
#ifdef INCLUDE_CUDA
    gpu_buff.clear();
#endif
    cpu_buff.clear();
}

template< MemType mem >
std::vector<buffer<mem> >& memory_pipline_base::get_memtyped_vector()
{
    return cpu_buff;
}

#ifdef INCLUDE_CUDA
template<>
std::vector<buffer<MemType::GPU> >& memory_pipline_base::get_memtyped_vector()
{
    return gpu_buff;
}
#endif

template< MemType mem >
void memory_pipline_base::set_available(const int id)
{
     get_memtyped_vector<mem>()[id].set_status(true);
}

template void memory_pipline_base::set_available<MemType::CPU>(const int id);
#ifdef INCLUDE_CUDA
template void memory_pipline_base::set_available<MemType::GPU>(const int id);
#endif

template< MemType mem >
typename std::vector<buffer<mem>>::iterator get_lower_bound(typename std::vector<buffer<mem>>::iterator first, 
    typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const buffer<mem>&, const size_t) >& comp)
{
    typename std::vector<buffer<mem>>::iterator it;
    typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
    count = std::distance(first, last);
 
    while (count > 0)
    {
        it = first;
        step = count / 2;
        std::next(it, step);
 
        if (comp(*it, value))
        {
            first = ++it;
            count -= step + 1;
        }
        else
            count = step;
    }
 
    return first;
}

template typename std::vector<buffer<MemType::CPU>>::iterator get_lower_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first, 
    typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::CPU>&, const size_t)>& );
#ifdef INCLUDE_CUDA
template typename std::vector<buffer<MemType::GPU>>::iterator get_lower_bound<MemType::GPU, bool (const buffer<MemType::GPU>&, const size_t)>(typename std::vector<buffer<MemType::GPU>>::iterator first, 
    typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::GPU>&, const size_t)>& );
#endif

template< MemType mem >
typename std::vector<buffer<mem>>::iterator get_upper_bound(typename std::vector<buffer<mem>>::iterator first, 
    typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<mem>&) >& comp)
{
    typename std::vector<buffer<mem>>::iterator it;
    typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
    count = std::distance(first, last);
 
    while (count > 0)
    {
        it = first; 
        step = count / 2;
        std::next(it, step);
 
        if (!comp(value, *it))
        {
            first = ++it;
            count -= step + 1;
        } 
        else
            count = step;
    }
 
    return first;
}

template typename std::vector<buffer<MemType::CPU>>::iterator get_upper_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first, 
    typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::CPU>&)>& );
#ifdef INCLUDE_CUDA
template typename std::vector<buffer<MemType::GPU>>::iterator get_upper_bound<MemType::GPU, bool (const buffer<MemType::GPU>&, const size_t)>(typename std::vector<buffer<MemType::GPU>>::iterator first, 
    typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::GPU>&)>& );
#endif

template<buf_choose_policy choose_type>
template< MemType mem >
int memory_pipline<choose_type>::get_buffer(const size_t required_size, void *& ptr)
{
    std::vector<buffer<mem> > buff_vec = get_memtyped_vector<mem>();
    const int allocated_buffer_n = buff_vec.size();
    for (int i = 0; i < allocated_buffer_n; i++)
    {
        const bool is_free = buff_vec[i].get_status();
        const int avail_size = buff_vec[i].get_size();

        if(is_free == true && required_size <= avail_size)
        {
            ptr = buff_vec[i].get_ptr();
            return i;
        }
    }

    buff_vec.push_back(buffer<mem>(required_size));
    ptr = buff_vec.back().get_ptr();
    int id = buff_vec.size() - 1;
    return id;
}

template int memory_pipline<buf_choose_policy::naiv>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
#ifdef INCLUDE_CUDA
template int memory_pipline<buf_choose_policy::naiv>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
#endif

template< MemType mem >
int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer(const size_t required_size, void *& ptr)
{
    std::vector<buffer<mem> > buff_vec = get_memtyped_vector<mem>();
    const int allocated_buffer_n = buff_vec.size();
    for (int i = 0; i < allocated_buffer_n; i++)
    {
        const bool is_free = buff_vec[i].get_status();
        const int avail_size = buff_vec[i].get_size();

        if(is_free == true && required_size <= avail_size)
        {
            ptr = buff_vec[i].get_ptr();
            return i;
        }
    }

    typename std::vector<buffer<mem>>::iterator buf_it;
    std::function<bool (const size_t, const buffer<mem>&) > comparator = size_comparator<mem>;
    buf_it = get_upper_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
    buf_it = buff_vec.insert(buf_it, buffer<mem>(required_size));
    ptr = buf_it->get_ptr();
    int id = std::distance(buff_vec.begin(), buf_it);

    return id;
}

template int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
#ifdef INCLUDE_CUDA
template int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
#endif

template< MemType mem >
int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer(const size_t required_size, void *& ptr)
{
    std::vector<buffer<mem> > buff_vec = get_memtyped_vector<mem>();
    typename std::vector<buffer<mem>>::iterator available_buf_it;
    std::function<bool (const buffer<mem>&, const size_t) > comparator = free_size_comparator<mem>;
    available_buf_it = get_lower_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
    if(available_buf_it != buff_vec.end())
    {    
        ptr = available_buf_it->get_ptr();
        int id = std::distance(buff_vec.begin(), available_buf_it);
        return id;
    }
    else
    {
        buff_vec.push_back(buffer<mem>(required_size));
        ptr = buff_vec.back().get_ptr();
        int id = buff_vec.size() - 1;
        return id;
    }
}

template int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
#ifdef INCLUDE_CUDA
template int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
#endif


memory_pipline<buf_choose_policy::naiv>* memory_faucet::mem_pipe_naiv = nullptr;
memory_pipline<buf_choose_policy::sorted_vec>* memory_faucet::mem_pipe_sorted = nullptr;
memory_pipline<buf_choose_policy::find_best_unsorted>* memory_faucet::mem_pipe_unsorted = nullptr;

template<buf_choose_policy choose_type>
memory_pipline<choose_type>* memory_faucet::get_faucet()
{
    if(mem_pipe_naiv == nullptr)
    {
        mem_pipe_naiv = new memory_pipline<choose_type>();
    }
    return mem_pipe_naiv;
}

template memory_pipline<buf_choose_policy::naiv>* memory_faucet::get_faucet();

template< >
memory_pipline<buf_choose_policy::sorted_vec>* memory_faucet::get_faucet()
{
    if(mem_pipe_sorted == nullptr)
    {
        mem_pipe_sorted = new memory_pipline<buf_choose_policy::sorted_vec>();
    }
    return mem_pipe_sorted;
}

template< >
memory_pipline<buf_choose_policy::find_best_unsorted>* memory_faucet::get_faucet()
{
    if(mem_pipe_unsorted == nullptr)
    {
        mem_pipe_unsorted = new memory_pipline<buf_choose_policy::find_best_unsorted>();
    }
    return mem_pipe_unsorted;
}


template< MemType mem, buf_choose_policy choose_type >
memBuf<mem, choose_type>::memBuf(const size_t required_size)
{
    memory_pipline<choose_type>* mem_pipe = memory_faucet::get_faucet<choose_type>();
    id = mem_pipe->template get_buffer<mem>(required_size, buf);
    size = required_size;
}

template< MemType mem, buf_choose_policy choose_type >
memBuf<mem, choose_type>::~memBuf()
{
    // memory_pipline<choose_type>* mem_pipe = memory_faucet::get_faucet<choose_type>();
    // mem_pipe->template set_available<mem>(id);
}

template< MemType mem, buf_choose_policy choose_type >
void* memBuf<mem, choose_type>::ptr()
{
    return buf;
}

template< MemType mem, buf_choose_policy choose_type >
int memBuf<mem, choose_type>::get_size()
{
    return size;
}

template class memBuf<MemType::CPU, buf_choose_policy::naiv>;
template class memBuf<MemType::CPU, buf_choose_policy::sorted_vec>;
template class memBuf<MemType::CPU, buf_choose_policy::find_best_unsorted>;

#ifdef INCLUDE_CUDA
template class memBuf<MemType::GPU, buf_choose_policy::naiv>;
template class memBuf<MemType::GPU, buf_choose_policy::sorted_vec>;
template class memBuf<MemType::GPU, buf_choose_policy::find_best_unsorted>;
#endif