Newer
Older
#include "memory-holder.h"
#include "MemoryProcessing.h"
#ifdef INCLUDE_CUDA
#include "MemoryProcessing.cuh"
#endif
#include <algorithm>
template< MemType mem >
buffer<mem>::buffer()
{
ptr = nullptr;
allocated_size = 0;
scalar_size = 0;
is_free = true;
id = -1;
}
template< MemType mem >
buffer<mem>::~buffer()
{
memproc::dealloc<mem>((void *&)ptr, allocated_size);
}
template< MemType mem >
bool buffer<mem>::is_available() const
{
return is_free;
}
template< MemType mem >
void buffer<mem>::reallocate(const size_t required_size)
{
memproc::realloc<mem>((void *&)ptr, allocated_size, required_size);
}
template< MemType mem >
buffer<mem>::buffer(const size_t required_size)
{
reallocate(required_size);
scalar_size = 0;
is_free = false;
}
template< MemType mem >
{
if(obj.get_status() == false)
return false;
return obj.get_size() <= required_size;
}
template bool free_size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
#ifdef INCLUDE_CUDA
template bool free_size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
#endif
template< MemType mem >
bool size_comparator(const size_t required_size, const buffer<mem>& obj)
{
return required_size < obj.get_size();
}
template bool size_comparator(const size_t required_size, const buffer<MemType::CPU>& obj);
template bool size_comparator(const size_t required_size, const buffer<MemType::GPU>& obj);
{
return ptr;
}
template< MemType mem >
bool buffer<mem>::get_status() const
{
return is_free;
}
template< MemType mem >
size_t buffer<mem>::get_size() const
{
return allocated_size;
}
template< MemType mem >
int buffer<mem>::get_id() const
{
return id;
}
template< MemType mem >
void buffer<mem>::set_allocated_size(const size_t required_size)
{
allocated_size = required_size;
}
template< MemType mem >
buffer<mem>::buffer(const buffer<mem>& other)
{
allocated_size = 0;
reallocate(other.get_size());
is_free = other.get_status();
id = other.get_id();
}
template< MemType mem >
buffer<mem>& buffer<mem>::operator=(buffer<mem>& other)
{
if (this == &other)
return *this;
allocated_size = other.get_size();
other.set_allocated_size(size_t(0));
is_free = other.get_status();
id = other.get_id();
return *this;
}
template< MemType mem >
buffer<mem>& buffer<mem>::operator=(const buffer<mem>& other)
{
if (this == &other)
return *this;
reallocate(other.get_size());
is_free = other.get_status();
id = other.get_id();
return *this;
}
template< MemType mem >
void buffer<mem>::set_status(const bool status)
{
is_free = status;
}
template< MemType mem >
void buffer<mem>::set_id(const int idx)
{
id = idx;
}
template class buffer<MemType::CPU>;
std::vector<buffer<MemType::GPU> >& memory_pipline_base::get_memtyped_vector()
// printf("Id %d, size = %d\n", id, get_memtyped_vector<mem>().size());
get_memtyped_vector<mem>()[id].set_status(true);
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
}
template void memory_pipline_base::set_available<MemType::CPU>(const int id);
#ifdef INCLUDE_CUDA
template void memory_pipline_base::set_available<MemType::GPU>(const int id);
#endif
template< MemType mem >
typename std::vector<buffer<mem>>::iterator get_lower_bound(typename std::vector<buffer<mem>>::iterator first,
typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const buffer<mem>&, const size_t) >& comp)
{
typename std::vector<buffer<mem>>::iterator it;
typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
count = std::distance(first, last);
while (count > 0)
{
it = first;
step = count / 2;
std::next(it, step);
if (comp(*it, value))
{
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first;
}
template typename std::vector<buffer<MemType::CPU>>::iterator get_lower_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first,
typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::CPU>&, const size_t)>& );
#ifdef INCLUDE_CUDA
template typename std::vector<buffer<MemType::GPU>>::iterator get_lower_bound<MemType::GPU, bool (const buffer<MemType::GPU>&, const size_t)>(typename std::vector<buffer<MemType::GPU>>::iterator first,
typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::GPU>&, const size_t)>& );
#endif
template< MemType mem >
typename std::vector<buffer<mem>>::iterator get_upper_bound(typename std::vector<buffer<mem>>::iterator first,
typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<mem>&) >& comp)
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
typename std::vector<buffer<mem>>::iterator it;
typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
count = std::distance(first, last);
while (count > 0)
{
it = first;
step = count / 2;
std::next(it, step);
if (!comp(value, *it))
{
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first;
}
template typename std::vector<buffer<MemType::CPU>>::iterator get_upper_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first,
typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::CPU>&)>& );
#ifdef INCLUDE_CUDA
template typename std::vector<buffer<MemType::GPU>>::iterator get_upper_bound<MemType::GPU, bool (const buffer<MemType::GPU>&, const size_t)>(typename std::vector<buffer<MemType::GPU>>::iterator first,
typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::GPU>&)>& );
#endif
template<buf_choose_policy choose_type>
template< MemType mem >
int memory_pipline<choose_type>::get_buffer(const size_t required_size, void *& ptr)
{
const int allocated_buffer_n = buff_vec.size();
for (int i = 0; i < allocated_buffer_n; i++)
{
const bool is_free = buff_vec[i].get_status();
const int avail_size = buff_vec[i].get_size();
if(is_free == true && required_size <= avail_size)
{
ptr = buff_vec[i].get_ptr();
return i;
}
}
buff_vec.push_back(buffer<mem>(required_size));
ptr = buff_vec.back().get_ptr();
int id = buff_vec.size() - 1;
return id;
}
template int memory_pipline<buf_choose_policy::naiv>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
#ifdef INCLUDE_CUDA
template int memory_pipline<buf_choose_policy::naiv>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
#endif
template< MemType mem >
int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer(const size_t required_size, void *& ptr)
{
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
const int allocated_buffer_n = buff_vec.size();
for (int i = 0; i < allocated_buffer_n; i++)
{
const bool is_free = buff_vec[i].get_status();
const int avail_size = buff_vec[i].get_size();
if(is_free == true && required_size <= avail_size)
{
ptr = buff_vec[i].get_ptr();
return i;
}
}
typename std::vector<buffer<mem>>::iterator buf_it;
std::function<bool (const size_t, const buffer<mem>&) > comparator = size_comparator<mem>;
buf_it = get_upper_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
buf_it = buff_vec.insert(buf_it, buffer<mem>(required_size));
ptr = buf_it->get_ptr();
int id = std::distance(buff_vec.begin(), buf_it);
return id;
}
template int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
#ifdef INCLUDE_CUDA
template int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
#endif
template< MemType mem >
int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer(const size_t required_size, void *& ptr)
{
std::function<bool (const buffer<mem>&, const size_t) > comparator = free_size_comparator<mem>;
available_buf_it = get_lower_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
if(available_buf_it != buff_vec.end())
buff_vec.push_back(buffer<mem>(required_size));
ptr = buff_vec.back().get_ptr();
int id = buff_vec.size() - 1;
template int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
template int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
memory_pipline<buf_choose_policy::naiv>* memory_faucet::mem_pipe_naiv = nullptr;
memory_pipline<buf_choose_policy::sorted_vec>* memory_faucet::mem_pipe_sorted = nullptr;
memory_pipline<buf_choose_policy::find_best_unsorted>* memory_faucet::mem_pipe_unsorted = nullptr;
template<buf_choose_policy choose_type>
memory_pipline<choose_type>* memory_faucet::get_faucet()
if(mem_pipe_naiv == nullptr)
{
mem_pipe_naiv = new memory_pipline<choose_type>();
}
return mem_pipe_naiv;
template memory_pipline<buf_choose_policy::naiv>* memory_faucet::get_faucet();
template< >
memory_pipline<buf_choose_policy::sorted_vec>* memory_faucet::get_faucet()
{
if(mem_pipe_sorted == nullptr)
{
mem_pipe_sorted = new memory_pipline<buf_choose_policy::sorted_vec>();
}
return mem_pipe_sorted;
}
template< >
memory_pipline<buf_choose_policy::find_best_unsorted>* memory_faucet::get_faucet()
mem_pipe_unsorted = new memory_pipline<buf_choose_policy::find_best_unsorted>();
template< MemType mem, buf_choose_policy choose_type >
memBuf<mem, choose_type>::memBuf(const size_t required_size)
memory_pipline<choose_type>* mem_pipe = memory_faucet::get_faucet<choose_type>();
id = mem_pipe->template get_buffer<mem>(required_size, buf);
// printf("mem %d, choose_type %d, kkddd %d\n", mem, choose_type, mem_pipe->template get_memtyped_vector<mem>().size());
template< MemType mem, buf_choose_policy choose_type >
memBuf<mem, choose_type>::~memBuf()
memory_pipline<choose_type>* mem_pipe = memory_faucet::get_faucet<choose_type>();
// printf("mem %d, choose_type %d, kk %d\n", mem, choose_type, mem_pipe->template get_memtyped_vector<mem>().size());
mem_pipe->template set_available<mem>(id);
template< MemType mem, buf_choose_policy choose_type >
void* memBuf<mem, choose_type>::ptr()
template< MemType mem, buf_choose_policy choose_type >
int memBuf<mem, choose_type>::get_size()
template class memBuf<MemType::CPU, buf_choose_policy::naiv>;
template class memBuf<MemType::CPU, buf_choose_policy::sorted_vec>;
template class memBuf<MemType::CPU, buf_choose_policy::find_best_unsorted>;
template class memBuf<MemType::GPU, buf_choose_policy::naiv>;
template class memBuf<MemType::GPU, buf_choose_policy::sorted_vec>;
template class memBuf<MemType::GPU, buf_choose_policy::find_best_unsorted>;