Newer
Older
#include "../include/MemoryProcessing.cuh"
#include <cuda.h>
#include <cuda_runtime_api.h>
template<>
{
if(allocated_size > 0)
{
cudaFree(array);
allocated_size = 0;
}
return true;
}
template<>
bool memproc::dealloc<MemType::GPU>(void *&array)
{
cudaFree(array);
return true;
bool memproc::realloc<MemType::GPU>(void *&array, size_t &allocated_size, const size_t new_size)
{
if(new_size > allocated_size)
{
if(allocated_size > 0) dealloc<MemType::GPU>(array, allocated_size);
allocated_size = new_size;
cudaMalloc ( (void **)&array, new_size);
cudaMemset(array, 0, new_size);
}
return true;
}
template <>
bool memproc::memcopy<MemType::GPU, MemType::CPU>(void *dst, const void* src, const size_t copy_elem_size)
{
cudaMemcpy ( dst, src, copy_elem_size, cudaMemcpyHostToDevice);
return true;
}
template <>
bool memproc::memcopy<MemType::CPU, MemType::GPU>(void *dst, const void* src, const size_t copy_elem_size)
{
cudaMemcpy ( dst, src, copy_elem_size, cudaMemcpyDeviceToHost);
return true;
}
template <>
bool memproc::memcopy<MemType::GPU, MemType::GPU>(void *dst, const void* src, const size_t copy_elem_size)