diff --git a/Lib/memory-holder.cpp b/Lib/memory-holder.cpp
index 6d2bafb694029fb7674583ff910653162f85f096..bd833217b9aacf6a760e2af601ef36af5ccbb9b0 100644
--- a/Lib/memory-holder.cpp
+++ b/Lib/memory-holder.cpp
@@ -5,6 +5,7 @@
 #endif
 
 #include <algorithm>
+#include<stdio.h>
 
 template< MemType mem >
 buffer<mem>::buffer()
@@ -37,13 +38,21 @@ void buffer<mem>::reallocate(const size_t required_size)
 template< MemType mem >
 buffer<mem>::buffer(const size_t required_size)
 {
+    allocated_size = 0;
+    // printf("required_size in constructor %ld\n", required_size);
     reallocate(required_size);
+    // printf("allocated_size in constructor %ld\n", allocated_size);
     scalar_size = 0;
     is_free = false;
 }
 
+// typedef bool (*Fun)(const buffer<MemType::CPU>&, const size_t);
+// #ifdef INCLUDE_CUDA
+// typedef bool (*Fun)(const buffer<MemType::GPU>&, const size_t);
+// #endif
+
 template< MemType mem >
-bool size_comparator(const buffer<mem>& obj, const size_t required_size)
+bool free_size_comparator(const buffer<mem>& obj, const size_t required_size)
 {
     if(obj.get_status() == false)
         return false;
@@ -51,9 +60,20 @@ bool size_comparator(const buffer<mem>& obj, const size_t required_size)
     return obj.get_size() <= required_size;
 }
 
-template bool size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
+template bool free_size_comparator(const buffer<MemType::CPU>& obj, const size_t required_size);
+#ifdef INCLUDE_CUDA
+template bool free_size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
+#endif
+
+template< MemType mem >
+bool size_comparator(const size_t required_size, const buffer<mem>& obj)
+{
+    return required_size < obj.get_size();
+}
+
+template bool size_comparator(const size_t required_size, const buffer<MemType::CPU>& obj);
 #ifdef INCLUDE_CUDA
-template bool size_comparator(const buffer<MemType::GPU>& obj, const size_t required_size);
+template bool size_comparator(const size_t required_size, const buffer<MemType::GPU>& obj);
 #endif
 
 template< MemType mem >
@@ -74,17 +94,56 @@ size_t buffer<mem>::get_size() const
     return allocated_size;
 }
 
+template< MemType mem >
+int buffer<mem>::get_id() const
+{
+    return id;
+}
+
+template< MemType mem >
+void buffer<mem>::set_allocated_size(const size_t required_size) 
+{
+    allocated_size = required_size;
+}
+
+template< MemType mem >
+buffer<mem>::buffer(const buffer<mem>& other)
+{
+    allocated_size = 0;
+    // printf("Here in copy construct\n");
+    // printf("allocated_size %ld\n", other.get_size());
+    reallocate(other.get_size());
+    is_free = other.get_status();
+    id = other.get_id();
+}
+
 template< MemType mem >
 buffer<mem>& buffer<mem>::operator=(buffer<mem>& other)
 {
+    // printf("Here in swap\n");
+
     if (this == &other)
         return *this;
- 
+
     std::swap(ptr, other.ptr);
-    std::swap(allocated_size, other.allocated_size); // exchange resources between *this and other
-    std::swap(scalar_size, other.scalar_size);
-    std::swap(is_free, other.is_free);
-    std::swap(id, other.id);
+    allocated_size = other.get_size();
+    other.set_allocated_size(size_t(0));
+    is_free = other.get_status();
+    id = other.get_id();
+    return *this;
+}
+
+template< MemType mem >
+buffer<mem>& buffer<mem>::operator=(const buffer<mem>& other)
+{
+    printf("Here in not swap\n");
+    if (this == &other)
+        return *this;
+ 
+    reallocate(other.get_size());
+    is_free = other.get_status();
+    id = other.get_id();
+
     return *this;
 }
 
@@ -105,7 +164,7 @@ template class buffer<MemType::CPU>;
 template class buffer<MemType::GPU>;
 #endif
 
-memory_pipline::memory_pipline()
+memory_pipline_base::memory_pipline_base()
 {
 #ifdef INCLUDE_CUDA
     gpu_buff = std::vector<buffer<MemType::GPU> > ();
@@ -113,7 +172,7 @@ memory_pipline::memory_pipline()
     cpu_buff = std::vector<buffer<MemType::CPU> > ();
 }
 
-memory_pipline::~memory_pipline()
+memory_pipline_base::~memory_pipline_base()
 {
 #ifdef INCLUDE_CUDA
     gpu_buff.clear();
@@ -122,94 +181,255 @@ memory_pipline::~memory_pipline()
 }
 
 template< MemType mem >
-std::vector<buffer<mem> >& memory_pipline::get_memtyped_vector()
+std::vector<buffer<mem> >& memory_pipline_base::get_memtyped_vector()
 {
     return cpu_buff;
 }
 
 #ifdef INCLUDE_CUDA
 template<>
-std::vector<buffer<MemType::GPU> >& memory_pipline::get_memtyped_vector()
+std::vector<buffer<MemType::GPU> >& memory_pipline_base::get_memtyped_vector()
 {
     return gpu_buff;
 }
 #endif
 
 template< MemType mem >
-int memory_pipline::get_buffer(const size_t required_size, void *& ptr)
+void memory_pipline_base::set_available(const int id)
+{
+     get_memtyped_vector<mem>()[id].set_status(true);
+}
+
+template void memory_pipline_base::set_available<MemType::CPU>(const int id);
+#ifdef INCLUDE_CUDA
+template void memory_pipline_base::set_available<MemType::GPU>(const int id);
+#endif
+
+template< MemType mem >
+typename std::vector<buffer<mem>>::iterator get_lower_bound(typename std::vector<buffer<mem>>::iterator first, 
+    typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const buffer<mem>&, const size_t) >& comp)
+{
+    typename std::vector<buffer<mem>>::iterator it;
+    typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
+    count = std::distance(first, last);
+ 
+    while (count > 0)
+    {
+        it = first;
+        step = count / 2;
+        std::next(it, step);
+ 
+        if (comp(*it, value))
+        {
+            first = ++it;
+            count -= step + 1;
+        }
+        else
+            count = step;
+    }
+ 
+    return first;
+}
+
+template typename std::vector<buffer<MemType::CPU>>::iterator get_lower_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first, 
+    typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::CPU>&, const size_t)>& );
+#ifdef INCLUDE_CUDA
+template typename std::vector<buffer<MemType::GPU>>::iterator get_lower_bound<MemType::GPU, bool (const buffer<MemType::GPU>&, const size_t)>(typename std::vector<buffer<MemType::GPU>>::iterator first, 
+    typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const buffer<MemType::GPU>&, const size_t)>& );
+#endif
+
+template< MemType mem >
+typename std::vector<buffer<mem>>::iterator get_upper_bound(typename std::vector<buffer<mem>>::iterator first, 
+    typename std::vector<buffer<mem>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<mem>&) >& comp)
 {
+    typename std::vector<buffer<mem>>::iterator it;
+    typename std::iterator_traits<typename std::vector<buffer<mem>>::iterator>::difference_type count, step;
+    count = std::distance(first, last);
+ 
+    while (count > 0)
+    {
+        it = first; 
+        step = count / 2;
+        std::next(it, step);
+ 
+        if (!comp(value, *it))
+        {
+            first = ++it;
+            count -= step + 1;
+        } 
+        else
+            count = step;
+    }
+ 
+    return first;
+}
+
+template typename std::vector<buffer<MemType::CPU>>::iterator get_upper_bound<MemType::CPU >(typename std::vector<buffer<MemType::CPU>>::iterator first, 
+    typename std::vector<buffer<MemType::CPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::CPU>&)>& );
+#ifdef INCLUDE_CUDA
+template typename std::vector<buffer<MemType::GPU>>::iterator get_upper_bound<MemType::GPU, bool (const buffer<MemType::GPU>&, const size_t)>(typename std::vector<buffer<MemType::GPU>>::iterator first, 
+    typename std::vector<buffer<MemType::GPU>>::iterator last, const size_t value, std::function<bool (const size_t, const buffer<MemType::GPU>&)>& );
+#endif
+
+template<buf_choose_policy choose_type>
+template< MemType mem >
+int memory_pipline<choose_type>::get_buffer(const size_t required_size, void *& ptr)
+{
+    std::vector<buffer<mem> > buff_vec = get_memtyped_vector<mem>();
+    const int allocated_buffer_n = buff_vec.size();
+    for (int i = 0; i < allocated_buffer_n; i++)
+    {
+        const bool is_free = buff_vec[i].get_status();
+        const int avail_size = buff_vec[i].get_size();
+
+        if(is_free == true && required_size <= avail_size)
+        {
+            ptr = buff_vec[i].get_ptr();
+            return i;
+        }
+    }
+
+    buff_vec.push_back(buffer<mem>(required_size));
+    ptr = buff_vec.back().get_ptr();
+    int id = buff_vec.size() - 1;
+    return id;
+}
+
+template int memory_pipline<buf_choose_policy::naiv>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
+#ifdef INCLUDE_CUDA
+template int memory_pipline<buf_choose_policy::naiv>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
+#endif
+
+template< MemType mem >
+int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer(const size_t required_size, void *& ptr)
+{
+    std::vector<buffer<mem> > buff_vec = get_memtyped_vector<mem>();
+    const int allocated_buffer_n = buff_vec.size();
+    for (int i = 0; i < allocated_buffer_n; i++)
+    {
+        const bool is_free = buff_vec[i].get_status();
+        const int avail_size = buff_vec[i].get_size();
+
+        if(is_free == true && required_size <= avail_size)
+        {
+            ptr = buff_vec[i].get_ptr();
+            return i;
+        }
+    }
+
+    typename std::vector<buffer<mem>>::iterator buf_it;
+    std::function<bool (const size_t, const buffer<mem>&) > comparator = size_comparator<mem>;
+    buf_it = get_upper_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
+    buf_it = buff_vec.insert(buf_it, buffer<mem>(required_size));
+    ptr = buf_it->get_ptr();
+    int id = std::distance(buff_vec.begin(), buf_it);
+
+    return id;
+}
+
+template int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
+#ifdef INCLUDE_CUDA
+template int memory_pipline<buf_choose_policy::sorted_vec>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
+#endif
+
+template< MemType mem >
+int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer(const size_t required_size, void *& ptr)
+{
+    std::vector<buffer<mem> > buff_vec = get_memtyped_vector<mem>();
     typename std::vector<buffer<mem>>::iterator available_buf_it;
-    available_buf_it = std::lower_bound (get_memtyped_vector<mem>().begin(), get_memtyped_vector<mem>().end(), required_size, size_comparator<mem>);
-    if(available_buf_it != get_memtyped_vector<mem>().end())
+    std::function<bool (const buffer<mem>&, const size_t) > comparator = free_size_comparator<mem>;
+    available_buf_it = get_lower_bound<mem> (buff_vec.begin(), buff_vec.end(), required_size, comparator);
+    if(available_buf_it != buff_vec.end())
     {    
         ptr = available_buf_it->get_ptr();
-        int id = std::distance(get_memtyped_vector<mem>().begin(), available_buf_it);
+        int id = std::distance(buff_vec.begin(), available_buf_it);
         return id;
     }
     else
     {
-        get_memtyped_vector<mem>().push_back(buffer<mem>(required_size));
-        ptr = get_memtyped_vector<mem>().back().get_ptr();
-        int id = get_memtyped_vector<mem>().size() - 1;
+        buff_vec.push_back(buffer<mem>(required_size));
+        ptr = buff_vec.back().get_ptr();
+        int id = buff_vec.size() - 1;
         return id;
     }
 }
 
-template int memory_pipline::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
+template int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::CPU>(const size_t required_size, void *& ptr);
 #ifdef INCLUDE_CUDA
-template int memory_pipline::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
+template int memory_pipline<buf_choose_policy::find_best_unsorted>::get_buffer<MemType::GPU>(const size_t required_size, void *& ptr);
 #endif
 
-template< MemType mem >
-void memory_pipline::set_available(const int id)
+
+memory_pipline<buf_choose_policy::naiv>* memory_faucet::mem_pipe_naiv = nullptr;
+memory_pipline<buf_choose_policy::sorted_vec>* memory_faucet::mem_pipe_sorted = nullptr;
+memory_pipline<buf_choose_policy::find_best_unsorted>* memory_faucet::mem_pipe_unsorted = nullptr;
+
+template<buf_choose_policy choose_type>
+memory_pipline<choose_type>* memory_faucet::get_faucet()
 {
-     get_memtyped_vector<mem>()[id].set_status(true);
+    if(mem_pipe_naiv == nullptr)
+    {
+        mem_pipe_naiv = new memory_pipline<choose_type>();
+    }
+    return mem_pipe_naiv;
 }
 
-template void memory_pipline::set_available<MemType::CPU>(const int id);
-#ifdef INCLUDE_CUDA
-template void memory_pipline::set_available<MemType::GPU>(const int id);
-#endif
+template memory_pipline<buf_choose_policy::naiv>* memory_faucet::get_faucet();
 
-memory_pipline* memory_faucet::mem_pipe = nullptr;
+template< >
+memory_pipline<buf_choose_policy::sorted_vec>* memory_faucet::get_faucet()
+{
+    if(mem_pipe_sorted == nullptr)
+    {
+        mem_pipe_sorted = new memory_pipline<buf_choose_policy::sorted_vec>();
+    }
+    return mem_pipe_sorted;
+}
 
-memory_pipline* memory_faucet::get_faucet()
+template< >
+memory_pipline<buf_choose_policy::find_best_unsorted>* memory_faucet::get_faucet()
 {
-    if(mem_pipe == nullptr)
+    if(mem_pipe_unsorted == nullptr)
     {
-        mem_pipe = new memory_pipline();
+        mem_pipe_unsorted = new memory_pipline<buf_choose_policy::find_best_unsorted>();
     }
-    return mem_pipe;
+    return mem_pipe_unsorted;
 }
 
-template< MemType mem >
-memBuf<mem>::memBuf(const size_t required_size)
+
+template< MemType mem, buf_choose_policy choose_type >
+memBuf<mem, choose_type>::memBuf(const size_t required_size)
 {
-    memory_pipline* mem_pipe = memory_faucet::get_faucet();
-    id = mem_pipe->get_buffer<mem>(required_size, buf);
+    memory_pipline<choose_type>* mem_pipe = memory_faucet::get_faucet<choose_type>();
+    id = mem_pipe->template get_buffer<mem>(required_size, buf);
     size = required_size;
 }
 
-template< MemType mem >
-memBuf<mem>::~memBuf()
+template< MemType mem, buf_choose_policy choose_type >
+memBuf<mem, choose_type>::~memBuf()
 {
-    memory_pipline* mem_pipe = memory_faucet::get_faucet();
-    mem_pipe->set_available<mem>(id);
+    // memory_pipline<choose_type>* mem_pipe = memory_faucet::get_faucet<choose_type>();
+    // mem_pipe->template set_available<mem>(id);
 }
 
-template< MemType mem >
-void* memBuf<mem>::ptr()
+template< MemType mem, buf_choose_policy choose_type >
+void* memBuf<mem, choose_type>::ptr()
 {
     return buf;
 }
 
-template< MemType mem >
-int memBuf<mem>::get_size()
+template< MemType mem, buf_choose_policy choose_type >
+int memBuf<mem, choose_type>::get_size()
 {
     return size;
 }
 
-template class memBuf<MemType::CPU>;
+template class memBuf<MemType::CPU, buf_choose_policy::naiv>;
+template class memBuf<MemType::CPU, buf_choose_policy::sorted_vec>;
+template class memBuf<MemType::CPU, buf_choose_policy::find_best_unsorted>;
+
 #ifdef INCLUDE_CUDA
-template class memBuf<MemType::GPU>;
+template class memBuf<MemType::GPU, buf_choose_policy::naiv>;
+template class memBuf<MemType::GPU, buf_choose_policy::sorted_vec>;
+template class memBuf<MemType::GPU, buf_choose_policy::find_best_unsorted>;
 #endif
diff --git a/Lib/memory-holder.h b/Lib/memory-holder.h
index 5c2b0a14b28561b2a0949de52f6eb7150316da55..9ec600356be5c1aec6069dd72363dd6f0dd4f8b3 100644
--- a/Lib/memory-holder.h
+++ b/Lib/memory-holder.h
@@ -1,13 +1,15 @@
 #pragma once
 #include <cstdlib>
 #include <vector>
+#include <functional>
+
 #include "TemplateParameters.h"
 
 enum class buf_choose_policy
 {
     naiv,
     sorted_vec,
-    unsorted_vec
+    find_best_unsorted
 };
 
 template< MemType mem >
@@ -21,6 +23,7 @@ private:
     int id;
 
 public:
+    buffer(const buffer<mem>& other);
     buffer();
     ~buffer();
     buffer(const size_t required_size);
@@ -29,31 +32,79 @@ public:
     void* get_ptr();
     bool get_status() const;
     size_t get_size() const;
+    int get_id() const;
     buffer<mem>& operator=(buffer<mem>& other);
+    buffer<mem>& operator=(const buffer<mem>& other);
     void reallocate(const size_t required_size);
+    void set_allocated_size(const size_t required_size);
     void set_status(const bool status);
     void set_id(const int id);
 };
 
-class memory_pipline
+class memory_pipline_base
 {
-private:
+public:
 #ifdef INCLUDE_CUDA
     std::vector<buffer<MemType::GPU> > gpu_buff;
 #endif
     std::vector<buffer<MemType::CPU> > cpu_buff;
+
+    memory_pipline_base();
+    ~memory_pipline_base();
+
+    template< MemType mem >
+    void set_available(const int id);
+
+    template< MemType mem >
+    std::vector<buffer<mem> >& get_memtyped_vector();
+};
+
+template<buf_choose_policy choose_type = buf_choose_policy::naiv>
+class memory_pipline: public memory_pipline_base 
+{
+private:
+#ifdef INCLUDE_CUDA
+    using memory_pipline_base::gpu_buff;
+#endif
+    using memory_pipline_base::cpu_buff;
 public:
-    memory_pipline();
-    ~memory_pipline();
+    memory_pipline(/* args */) : memory_pipline_base() {}
+    ~memory_pipline() = default;
 
     template< MemType mem >
     int get_buffer(const size_t required_size, void *& ptr);
+};
+
+template<>
+class memory_pipline<buf_choose_policy::sorted_vec>: public memory_pipline_base 
+{
+private:
+#ifdef INCLUDE_CUDA
+    using memory_pipline_base::gpu_buff;
+#endif
+    using memory_pipline_base::cpu_buff;
+public:
+    memory_pipline(/* args */) : memory_pipline_base() {}
+    ~memory_pipline() = default;
 
     template< MemType mem >
-    std::vector<buffer<mem> >& get_memtyped_vector();
+    int get_buffer(const size_t required_size, void *& ptr);
+};
+
+template<>
+class memory_pipline<buf_choose_policy::find_best_unsorted>: public memory_pipline_base 
+{
+private:
+#ifdef INCLUDE_CUDA
+    using memory_pipline_base::gpu_buff;
+#endif
+    using memory_pipline_base::cpu_buff;
+public:
+    memory_pipline(/* args */) : memory_pipline_base() {}
+    ~memory_pipline() = default;
 
     template< MemType mem >
-    void set_available(const int id);
+    int get_buffer(const size_t required_size, void *& ptr);
 };
 
 class memory_faucet
@@ -63,13 +114,17 @@ private:
     memory_faucet(const memory_faucet&) = delete;
     memory_faucet& operator=(const memory_faucet&) = delete;
 
-    static memory_pipline* mem_pipe;
+    static memory_pipline<buf_choose_policy::naiv>* mem_pipe_naiv;
+    static memory_pipline<buf_choose_policy::sorted_vec>* mem_pipe_sorted;
+    static memory_pipline<buf_choose_policy::find_best_unsorted>* mem_pipe_unsorted;
 
 public:
-    static memory_pipline* get_faucet();
+
+    template<buf_choose_policy choose_type = buf_choose_policy::naiv>
+    static memory_pipline<choose_type>* get_faucet();
 };
 
-template< MemType mem >
+template< MemType mem, buf_choose_policy choose_type = buf_choose_policy::naiv >
 class memBuf
 {
 private:
diff --git a/main.cpp b/main.cpp
index fb050559f7823841cd8522f6095dbf40dc9cdef4..3f6e94ab8c4b5ac935ab46408850f051ce46249c 100644
--- a/main.cpp
+++ b/main.cpp
@@ -1,13 +1,14 @@
 #include "memory-holder.h"
-
+#include <cstdio>
 int main(void)
 {
     const size_t required_size = sizeof(float) * 100;
-    memBuf<MemType::CPU> Buf(required_size);
-    memBuf<MemType::GPU> Buf_gpu(required_size);
+    // printf("required_size %ld\n", required_size);
+    memBuf<MemType::CPU, buf_choose_policy::find_best_unsorted> Buf(required_size);
+    // memBuf<MemType::GPU> Buf_gpu(required_size);
 
     float* ptr = static_cast<float*>(Buf.ptr());
-    float* ptr_gpu = static_cast<float*>(Buf_gpu.ptr());
+    // float* ptr_gpu = static_cast<float*>(Buf_gpu.ptr());
 
     for (int i = 0; i < 100; i++)
     {