Skip to content
Snippets Groups Projects
Commit 34b020b3 authored by 数学の武士's avatar 数学の武士
Browse files

.

parent 5d76d6bb
No related branches found
No related tags found
No related merge requests found
cmake_minimum_required(VERSION 3.19)
option(INCLUDE_CUDA "GPU build in mode" OFF)
project(memory-holder-test)
enable_language(CXX)
set(CMAKE_CXX_STANDARD 11)
if(INCLUDE_CUDA)
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
message(FATAL_ERROR "
CMake will not pass any architecture flags to the compiler
because the CUDA architecture is not set. You should specify
an architecture: set -DCMAKE_CUDA_ARCHITECTURES=<N>.")
endif(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
enable_language(CUDA)
include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
add_definitions(-DINCLUDE_CUDA)
endif(INCLUDE_CUDA)
add_subdirectory(Lib)
set(SOURCES
main.cpp
)
add_executable(test ${SOURCES})
target_include_directories(test PUBLIC ${memory_processing_SOURCE_DIR}/include ./Lib/)
target_link_libraries(test memproc memory-holder)
\ No newline at end of file
cmake_minimum_required(VERSION 3.19)
option(INCLUDE_CUDA "GPU build in mode" OFF)
project(memory-holder)
enable_language(CXX)
set(CMAKE_CXX_STANDARD 11)
include(FetchContent)
FetchContent_Declare(
memory_processing
GIT_REPOSITORY http://tesla.parallel.ru/Lizzzka007/memory_processing.git
GIT_TAG main
)
FetchContent_MakeAvailable(memory_processing)
FetchContent_GetProperties(memory_processing)
if(INCLUDE_CUDA)
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
message(FATAL_ERROR "
CMake will not pass any architecture flags to the compiler
because the CUDA architecture is not set. You should specify
an architecture: set -DCMAKE_CUDA_ARCHITECTURES=<N>.")
endif(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
enable_language(CUDA)
include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
add_definitions(-DINCLUDE_CUDA)
endif(INCLUDE_CUDA)
set(SOURCES
memory-holder.cpp
)
set(HEADER
memory-holder.h
)
add_library(memory-holder ${SOURCES} ${HEADER})
target_include_directories(memory-holder PUBLIC ${memory_processing_SOURCE_DIR}/include)
target_link_libraries(memory-holder memproc)
\ No newline at end of file
#include "memory-holder.h"
#include "MemoryProcessing.h"
#ifdef INCLUDE_CUDA
#include "MemoryProcessing.cuh"
#endif
#include <algorithm>
template< MemType mem >
buffer<mem>::buffer()
{
ptr = nullptr;
allocated_size = 0;
scalar_size = 0;
is_free = true;
id = -1;
}
template< MemType mem >
buffer<mem>::~buffer()
{
memproc::dealloc<mem>((void *&)ptr, allocated_size);
}
template< MemType mem >
bool buffer<mem>::is_available() const
{
return is_free;
}
template< MemType mem >
void buffer<mem>::reallocate(const size_t required_size)
{
memproc::realloc<mem>((void *&)ptr, allocated_size, required_size);
}
template< MemType mem >
buffer<mem>::buffer(const size_t required_size)
{
reallocate(required_size);
scalar_size = 0;
is_free = false;
}
template< MemType mem >
bool size_comparator(const buffer<mem>& obj, const size_t required_size)
{
if(obj.get_status() == false)
return false;
return obj.get_size() <= required_size;
}
template bool size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
template bool size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
template< MemType mem >
void* buffer<mem>::get_ptr() const
{
return ptr;
}
template< MemType mem >
bool buffer<mem>::get_status() const
{
return is_free;
}
template< MemType mem >
size_t buffer<mem>::get_size() const
{
return allocated_size;
}
template< MemType mem >
buffer<mem>& buffer<mem>::operator=(buffer<mem>& other)
{
if (this == &other)
return *this;
std::swap(ptr, other.ptr);
std::swap(allocated_size, other.allocated_size); // exchange resources between *this and other
std::swap(scalar_size, other.scalar_size);
std::swap(is_free, other.is_free);
std::swap(id, other.id);
return *this;
}
template< MemType mem >
void buffer<mem>::set_status(const bool status)
{
is_free = status;
}
template< MemType mem >
void buffer<mem>::set_id(const int idx)
{
id = idx;
}
template class buffer<MemType::CPU>;
template class buffer<MemType::GPU>;
memory_pipline::memory_pipline()
{
gpu_buff = std::vector<buffer<MemType::GPU> > ();
cpu_buff = std::vector<buffer<MemType::CPU> > ();
}
memory_pipline::~memory_pipline()
{
gpu_buff.clear();
cpu_buff.clear();
}
template< MemType mem >
std::vector<buffer<mem> >& memory_pipline::get_memtyped_vector()
{
return cpu_buff;
}
template<>
std::vector<buffer<MemType::GPU> >& memory_pipline::get_memtyped_vector()
{
return gpu_buff;
}
// template std::vector<buffer<MemType::CPU> >& memory_pipline::get_memtyped_vector();
// template std::vector<buffer<MemType::GPU> >& memory_pipline::get_memtyped_vector();
template< MemType mem >
int memory_pipline::get_buffer(const size_t required_size, void * ptr)
{
typename std::vector<buffer<mem>>::iterator available_buf_it;
available_buf_it = std::lower_bound (get_memtyped_vector<mem>().begin(), get_memtyped_vector<mem>().end(), required_size, size_comparator<mem>);
if(available_buf_it != get_memtyped_vector<mem>().end())
{
ptr = available_buf_it->get_ptr();
int id = std::distance(get_memtyped_vector<mem>().begin(), available_buf_it);
return id;
}
else
{
get_memtyped_vector<mem>().push_back(buffer<mem>(required_size));
ptr = get_memtyped_vector<mem>().back().get_ptr();
int id = get_memtyped_vector<mem>().size() - 1;
return id;
}
}
template int memory_pipline::get_buffer<MemType::CPU>(const size_t required_size, void * ptr);
template int memory_pipline::get_buffer<MemType::GPU>(const size_t required_size, void * ptr);
template< MemType mem >
void memory_pipline::set_available(const int id)
{
get_memtyped_vector<mem>()[id].set_status(true);
}
template void memory_pipline::set_available<MemType::CPU>(const int id);
template void memory_pipline::set_available<MemType::GPU>(const int id);
memory_faucet::memory_faucet(){}
memory_pipline* memory_faucet::get_faucet()
{
if(mem_pipe == nullptr)
{
mem_pipe = new memory_pipline();
}
return mem_pipe;
}
memory_pipline* memory_faucet::mem_pipe = nullptr;
template< MemType mem >
memBuf<mem>::memBuf(const size_t required_size)
{
memory_pipline* mem_pipe = memory_faucet::get_faucet();
id = mem_pipe->get_buffer<mem>(required_size, buf);
size = required_size;
}
template< MemType mem >
memBuf<mem>::~memBuf()
{
memory_pipline* mem_pipe = memory_faucet::get_faucet();
mem_pipe->set_available<mem>(id);
}
template< MemType mem >
void* memBuf<mem>::ptr()
{
return buf;
}
template< MemType mem >
int memBuf<mem>::get_size()
{
return size;
}
template class memBuf<MemType::CPU>;
template class memBuf<MemType::GPU>;
\ No newline at end of file
#pragma once
#include <cstdlib>
#include <vector>
#include "TemplateParameters.h"
enum class buf_choose_policy
{
naiv,
sorted_vec,
unsorted_vec
};
template< MemType mem >
class buffer
{
private:
void *ptr;
size_t allocated_size;
size_t scalar_size;
bool is_free;
int id;
public:
buffer();
~buffer();
buffer(const size_t required_size);
bool is_available() const;
void* get_ptr() const;
bool get_status() const;
size_t get_size() const;
buffer<mem>& operator=(buffer<mem>& other);
void reallocate(const size_t required_size);
void set_status(const bool status);
void set_id(const int id);
};
class memory_pipline
{
private:
std::vector<buffer<MemType::GPU> > gpu_buff;
std::vector<buffer<MemType::CPU> > cpu_buff;
public:
memory_pipline();
~memory_pipline();
template< MemType mem >
int get_buffer(const size_t required_size, void * ptr);
template< MemType mem >
std::vector<buffer<mem> >& get_memtyped_vector();
template< MemType mem >
void set_available(const int id);
};
class memory_faucet
{
private:
memory_faucet();
memory_faucet(const memory_faucet&) = delete;
memory_faucet& operator=(const memory_faucet&) = delete;
static memory_pipline* mem_pipe;
public:
static memory_pipline* get_faucet();
};
template< MemType mem >
class memBuf
{
private:
void* buf;
int id;
size_t size;
public:
memBuf(const size_t required_size);
~memBuf();
void* ptr();
int get_size();
};
\ No newline at end of file
#include "memory-holder.h"
int main(void)
{
const size_t required_size = sizeof(float) * 100;
memBuf<MemType::CPU> Buf(required_size);
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment