Skip to content
Snippets Groups Projects
sfx_flux.cpp 9.67 KiB
Newer Older
数学の武士's avatar
数学の武士 committed
#include <iostream>

数学の武士's avatar
数学の武士 committed
#include "../includeCXX/sfx_flux.h"
#include "../includeCXX/sfx_flux_compute_func.h"
数学の武士's avatar
数学の武士 committed
#ifdef INCLUDE_CUDA
数学の武士's avatar
数学の武士 committed
    #include "../includeCU/sfx_flux_compute_func.cuh"
    #include "../includeCU/sfx_memory_processing.cuh"
数学の武士's avatar
数学の武士 committed
#endif

数学の武士's avatar
数学の武士 committed
#include "../includeCXX/sfx_memory_processing.h"
数学の武士's avatar
数学の武士 committed

数学の武士's avatar
数学の武士 committed
template<typename T, MemType memIn, MemType memOut, MemType RunMem >
Flux<T, memIn, memOut, RunMem>::Flux()
数学の武士's avatar
数学の武士 committed
{
    kappa = 0, Pr_t_0_inv = 0, Pr_t_inf_inv = 0, 
    alpha_m = 0, alpha_h = 0, alpha_h_fix = 0, 
    beta_m = 0, beta_h = 0, 
    Rib_max = 0, Re_rough_min = 0, 
    B1_rough = 0, B2_rough = 0, 
    B_max_land = 0, B_max_ocean = 0, B_max_lake = 0,
    gamma_c = 0,  
    Re_visc_min = 0,
    Pr_m = 0, nu_air = 0, g = 0;

    grid_size = 0;

    ifAllocated = false;
    allocated_size = 0;
}

数学の武士's avatar
数学の武士 committed
template<typename T, MemType memIn, MemType memOut, MemType RunMem >
void Flux<T, memIn, memOut, RunMem>::set_params(const int grid_size_, const T kappa_, const T Pr_t_0_inv_, const T Pr_t_inf_inv_, 
数学の武士's avatar
数学の武士 committed
    const T alpha_m_, const T alpha_h_, const T alpha_h_fix_, 
    const T beta_m_, const T beta_h_, const T Rib_max_, const T Re_rough_min_, 
    const T B1_rough_, const T B2_rough_,
    const T B_max_land_, const T B_max_ocean_, const T B_max_lake_,
    const T gamma_c_, const T Re_visc_min_,
    const T Pr_m_, const T nu_air_, const T g_)
{
    kappa = kappa_, Pr_t_0_inv = Pr_t_0_inv_, Pr_t_inf_inv = Pr_t_inf_inv_, 
    alpha_m = alpha_m_, alpha_h = alpha_h_, alpha_h_fix = alpha_h_fix_, 
    beta_m = beta_m_, beta_h = beta_h_, 
    Rib_max = Rib_max_, Re_rough_min = Re_rough_min_, B_max_lake = B_max_lake_,
    B1_rough = B1_rough_, B2_rough = B2_rough_, 
    B_max_land = B_max_land_, B_max_ocean = B_max_ocean_,
    gamma_c = gamma_c_,  
    Re_visc_min = Re_visc_min_,
    Pr_m = Pr_m_, nu_air = nu_air_, g = g_;

    grid_size = grid_size_;

    if(RunMem != memIn)
    {
        const size_t new_size = grid_size * sizeof(T);

        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(U), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(dT), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Tsemi), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(dQ), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(h), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(in_z0_m), allocated_size, new_size);
数学の武士's avatar
数学の武士 committed
    }
    if(RunMem != memOut)
    {
        const size_t new_size = grid_size * sizeof(T);
数学の武士's avatar
数学の武士 committed
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(zeta), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Rib), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Re), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Rib_conv_lim), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(z0_m), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(z0_t), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(B), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Cm), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Ct), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Km), allocated_size, new_size);
        allocated_size = 0;
        memproc::realloc<RunMem>((void *&)(Pr_t_inv), allocated_size, new_size);

        ifAllocated = true;
    }
}

数学の武士's avatar
数学の武士 committed
template<typename T, MemType memIn, MemType memOut, MemType RunMem >
Flux<T, memIn, memOut, RunMem>::~Flux()
数学の武士's avatar
数学の武士 committed
{
    kappa = 0, Pr_t_0_inv = 0, Pr_t_inf_inv = 0, 
    alpha_m = 0, alpha_h = 0, alpha_h_fix = 0, 
    beta_m = 0, beta_h = 0, 
    Rib_max = 0, Re_rough_min = 0, 
    B1_rough = 0, B2_rough = 0, 
    B_max_land = 0, B_max_ocean = 0, B_max_lake = 0,
    gamma_c = 0,  
    Re_visc_min = 0,
    Pr_m = 0, nu_air = 0, g = 0;

    grid_size = 0;

    if(ifAllocated == true)
    {
数学の武士's avatar
数学の武士 committed
        if(RunMem != memIn)
        {    
            memproc::dealloc<RunMem>((void*&)U);
            memproc::dealloc<RunMem>((void*&)dT);
            memproc::dealloc<RunMem>((void*&)Tsemi);
            memproc::dealloc<RunMem>((void*&)dQ);
            memproc::dealloc<RunMem>((void*&)h);
        }
        if(RunMem != memOut)
        {
            memproc::dealloc<RunMem>((void*&)zeta);
            memproc::dealloc<RunMem>((void*&)Rib);
            memproc::dealloc<RunMem>((void*&)Re);
            memproc::dealloc<RunMem>((void*&)Rib_conv_lim);
            memproc::dealloc<RunMem>((void*&)z0_m);
            memproc::dealloc<RunMem>((void*&)z0_t);
            memproc::dealloc<RunMem>((void*&)B);
            memproc::dealloc<RunMem>((void*&)Cm);
            memproc::dealloc<RunMem>((void*&)Ct);
            memproc::dealloc<RunMem>((void*&)Km);
            memproc::dealloc<RunMem>((void*&)Pr_t_inv);
        }
数学の武士's avatar
数学の武士 committed

        ifAllocated = false;
        allocated_size = 0;
    }
}

数学の武士's avatar
数学の武士 committed
template<typename T, MemType memIn, MemType memOut, MemType RunMem >
void Flux<T, memIn, memOut, RunMem>::set_data(T *zeta_, T *Rib_, T *Re_, T *B_, T *z0_m_, T *z0_t_, T *Rib_conv_lim_, T *Cm_, T *Ct_, T *Km_, T *Pr_t_inv_,
数学の武士's avatar
数学の武士 committed
    T *U_, T *dT_, T *Tsemi_, T *dQ_, T *h_, T *in_z0_m_)
数学の武士's avatar
数学の武士 committed
{
    if(RunMem == memIn)
    {
        U = U_;
        dT = dT_;
        Tsemi = Tsemi_;
        dQ = dQ_;
        h = h_;
        in_z0_m = in_z0_m_;
    }
    else
    {
        const size_t new_size = grid_size * sizeof(T);

        memproc::memcopy<RunMem, memIn>(U, U_, new_size);
        memproc::memcopy<RunMem, memIn>(dT, dT_, new_size);
        memproc::memcopy<RunMem, memIn>(Tsemi, Tsemi_, new_size);
        memproc::memcopy<RunMem, memIn>(dQ, dQ_, new_size);
        memproc::memcopy<RunMem, memIn>(h, h_, new_size);
        memproc::memcopy<RunMem, memIn>(in_z0_m, in_z0_m_, new_size);
    }
数学の武士's avatar
数学の武士 committed

    if(RunMem == memOut)
    {
        zeta = zeta_;
        Rib = Rib_;
        Re = Re_;
        Rib_conv_lim = Rib_conv_lim_;
        z0_m = z0_m_;
        z0_t = z0_t_;
        B = B_;
        Cm = Cm_;
        Ct = Ct_;
        Km = Km_;
        Pr_t_inv = Pr_t_inv_;
    }
数学の武士's avatar
数学の武士 committed
}

数学の武士's avatar
数学の武士 committed
template<typename T, MemType memIn, MemType memOut, MemType RunMem >
void Flux<T, memIn, memOut, RunMem>::compute_flux(T *zeta_, T *Rib_, T *Re_, T *B_, T *z0_m_, T *z0_t_, T *Rib_conv_lim_, T *Cm_, T *Ct_, T *Km_, T *Pr_t_inv_,
数学の武士's avatar
数学の武士 committed
    T *U_, T *dT_, T *Tsemi_, T *dQ_, T *h_, T *in_z0_m_, 
数学の武士's avatar
数学の武士 committed
    const int maxiters_charnock, const int maxiters_convection)
{
数学の武士's avatar
数学の武士 committed
    set_data(zeta_, Rib_, Re_, B_, z0_m_, z0_t_, Rib_conv_lim_, Cm_, Ct_, Km_, Pr_t_inv_,
    U_, dT_, Tsemi_, dQ_, h_, in_z0_m_);

    if(RunMem == MemType::CPU) compute_flux_cpu(zeta, Rib, Re, B, z0_m, z0_t, Rib_conv_lim, Cm, Ct, Km, Pr_t_inv,
    U, dT, Tsemi, dQ, h, in_z0_m, 
数学の武士's avatar
数学の武士 committed
    kappa, Pr_t_0_inv, Pr_t_inf_inv, 
    alpha_m, alpha_h, alpha_h_fix, 
    beta_m, beta_h, Rib_max, Re_rough_min, 
    B1_rough, B2_rough,
    B_max_land, B_max_ocean, B_max_lake,
    gamma_c, Re_visc_min,
    Pr_m, nu_air, g, 
    maxiters_charnock, maxiters_convection, 
    grid_size);

#ifdef INCLUDE_CUDA
数学の武士's avatar
数学の武士 committed
    else compute_flux_gpu(zeta, Rib, Re, B, z0_m, z0_t, Rib_conv_lim, Cm, Ct, Km, Pr_t_inv,
    U, dT, Tsemi, dQ, h, in_z0_m, 
数学の武士's avatar
数学の武士 committed
    kappa, Pr_t_0_inv, Pr_t_inf_inv, 
    alpha_m, alpha_h, alpha_h_fix, 
    beta_m, beta_h, Rib_max, Re_rough_min, 
    B1_rough, B2_rough,
    B_max_land, B_max_ocean, B_max_lake,
    gamma_c, Re_visc_min,
    Pr_m, nu_air, g, 
    maxiters_charnock, maxiters_convection, 
数学の武士's avatar
数学の武士 committed
    grid_size);
#endif

数学の武士's avatar
数学の武士 committed
    if(RunMem != memOut)
数学の武士's avatar
数学の武士 committed
    {
        const size_t new_size = grid_size * sizeof(T);

数学の武士's avatar
数学の武士 committed
        memproc::memcopy<memOut, RunMem>(zeta_, zeta, new_size);
        memproc::memcopy<memOut, RunMem>(Rib_, Rib, new_size);
        memproc::memcopy<memOut, RunMem>(Re_, Re, new_size);
        memproc::memcopy<memOut, RunMem>(Rib_conv_lim_, Rib_conv_lim, new_size);
        memproc::memcopy<memOut, RunMem>(z0_m_, z0_m, new_size);
        memproc::memcopy<memOut, RunMem>(z0_t_, z0_t, new_size);
        memproc::memcopy<memOut, RunMem>(B_, B, new_size);
        memproc::memcopy<memOut, RunMem>(Cm_, Cm, new_size);
        memproc::memcopy<memOut, RunMem>(Ct_, Ct, new_size);
        memproc::memcopy<memOut, RunMem>(Km_, Km, new_size);
        memproc::memcopy<memOut, RunMem>(Pr_t_inv_, Pr_t_inv, new_size);
数学の武士's avatar
数学の武士 committed
template class Flux<float, MemType::CPU, MemType::CPU, MemType::CPU>;
template class Flux<double, MemType::CPU, MemType::CPU, MemType::CPU>;
数学の武士's avatar
数学の武士 committed

#ifdef INCLUDE_CUDA
数学の武士's avatar
数学の武士 committed
    template class Flux<float, MemType::GPU, MemType::GPU, MemType::GPU>;
数学の武士's avatar
数学の武士 committed
    template class Flux<float, MemType::GPU, MemType::GPU, MemType::CPU>;
数学の武士's avatar
数学の武士 committed
    template class Flux<float, MemType::GPU, MemType::CPU, MemType::GPU>;
    template class Flux<float, MemType::CPU, MemType::GPU, MemType::GPU>;
数学の武士's avatar
数学の武士 committed
    template class Flux<float, MemType::CPU, MemType::CPU, MemType::GPU>;
数学の武士's avatar
.  
数学の武士 committed
    template class Flux<float, MemType::CPU, MemType::GPU, MemType::CPU>;
数学の武士's avatar
/  
数学の武士 committed
    template class Flux<float, MemType::GPU, MemType::CPU, MemType::CPU>;
数学の武士's avatar
数学の武士 committed

    template class Flux<double, MemType::GPU, MemType::GPU, MemType::GPU>;
数学の武士's avatar
数学の武士 committed
    template class Flux<double, MemType::GPU, MemType::GPU, MemType::CPU>;
数学の武士's avatar
数学の武士 committed
    template class Flux<double, MemType::GPU, MemType::CPU, MemType::GPU>;
    template class Flux<double, MemType::CPU, MemType::GPU, MemType::GPU>;
数学の武士's avatar
数学の武士 committed
    template class Flux<double, MemType::CPU, MemType::CPU, MemType::GPU>;
数学の武士's avatar
.  
数学の武士 committed
    template class Flux<double, MemType::CPU, MemType::GPU, MemType::CPU>;
数学の武士's avatar
/  
数学の武士 committed
    template class Flux<double, MemType::GPU, MemType::CPU, MemType::CPU>;
数学の武士's avatar
数学の武士 committed
#endif