Skip to content
Snippets Groups Projects
MemoryProcessing.cu 1.22 KiB
Newer Older
数学の武士's avatar
数学の武士 committed
#include "../include/MemoryProcessing.cuh"
#include <cuda.h>
#include <cuda_runtime_api.h>

template<>
bool dealloc<MemType::GPU>(void *&array, size_t &allocated_size)
{
    if(allocated_size > 0)
    {
        cudaFree(array);
        allocated_size = 0;
    }

    return true;
}

template <>
bool realloc<MemType::GPU>(void *&array, size_t &allocated_size, const size_t new_size)
{
    if(new_size > allocated_size)
    {
        if(allocated_size > 0) dealloc<MemType::GPU>(array, allocated_size);
        allocated_size = new_size;
        cudaMalloc ( (void **)&array, new_size);
        cudaMemset(array, 0, new_size);
    }

    return true;
}

template <>
bool memcopy<MemType::GPU, MemType::CPU>(void *dst, const void* src, const size_t copy_elem_size)
{
    cudaMemcpy ( dst, src, copy_elem_size, cudaMemcpyHostToDevice);

    return true;
}

template <>
bool memcopy<MemType::CPU, MemType::GPU>(void *dst, const void* src, const size_t copy_elem_size)
{
    cudaMemcpy ( dst, src, copy_elem_size, cudaMemcpyDeviceToHost);
    return true;
}

template <>
bool memcopy<MemType::GPU, MemType::GPU>(void *dst, const void* src, const size_t copy_elem_size)
{
    cudaMemcpy ( dst, src, copy_elem_size, cudaMemcpyDeviceToDevice);
    return true;
}