GeneralBlockPanelKernel.h 79.6 KB
Newer Older
LM's avatar
LM committed
1 2 3 4 5
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
Don Gagne's avatar
Don Gagne committed
6 7 8
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
LM's avatar
LM committed
9 10 11 12

#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
#define EIGEN_GENERAL_BLOCK_PANEL_H

13 14 15

namespace Eigen {

LM's avatar
LM committed
16 17 18 19 20
namespace internal {

template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
class gebp_traits;

Don Gagne's avatar
Don Gagne committed
21 22 23 24 25 26 27

/** \internal \returns b if a<=0, and returns a otherwise. */
inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
{
  return a<=0 ? b : a;
}

28 29 30 31 32 33 34 35 36 37
#if EIGEN_ARCH_i386_OR_x86_64
const std::ptrdiff_t defaultL1CacheSize = 32*1024;
const std::ptrdiff_t defaultL2CacheSize = 256*1024;
const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
#else
const std::ptrdiff_t defaultL1CacheSize = 16*1024;
const std::ptrdiff_t defaultL2CacheSize = 512*1024;
const std::ptrdiff_t defaultL3CacheSize = 512*1024;
#endif

LM's avatar
LM committed
38
/** \internal */
39 40 41 42 43 44 45
struct CacheSizes {
  CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
    int l1CacheSize, l2CacheSize, l3CacheSize;
    queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);
    m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize);
    m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize);
    m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize);
LM's avatar
LM committed
46
  }
47 48 49 50 51 52 53 54 55 56 57 58

  std::ptrdiff_t m_l1;
  std::ptrdiff_t m_l2;
  std::ptrdiff_t m_l3;
};


/** \internal */
inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
{
  static CacheSizes m_cacheSizes;

LM's avatar
LM committed
59 60 61 62
  if(action==SetAction)
  {
    // set the cpu cache size and cache all block sizes from a global cache size in byte
    eigen_internal_assert(l1!=0 && l2!=0);
63 64 65
    m_cacheSizes.m_l1 = *l1;
    m_cacheSizes.m_l2 = *l2;
    m_cacheSizes.m_l3 = *l3;
LM's avatar
LM committed
66 67 68 69
  }
  else if(action==GetAction)
  {
    eigen_internal_assert(l1!=0 && l2!=0);
70 71 72
    *l1 = m_cacheSizes.m_l1;
    *l2 = m_cacheSizes.m_l2;
    *l3 = m_cacheSizes.m_l3;
LM's avatar
LM committed
73 74 75 76 77 78 79
  }
  else
  {
    eigen_internal_assert(false);
  }
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
/* Helper for computeProductBlockingSizes.
 *
 * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
 * this function computes the blocking size parameters along the respective dimensions
 * for matrix products and related algorithms. The blocking sizes depends on various
 * parameters:
 * - the L1 and L2 cache sizes,
 * - the register level blocking sizes defined by gebp_traits,
 * - the number of scalars that fit into a packet (when vectorization is enabled).
 *
 * \sa setCpuCacheSizes */

template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>
void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)
{
  typedef gebp_traits<LhsScalar,RhsScalar> Traits;

  // Explanations:
  // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and
  // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed
  // per mr x kc horizontal small panels where mr is the blocking size along the m dimension
  // at the register level. This small horizontal panel has to stay within L1 cache.
  std::ptrdiff_t l1, l2, l3;
  manage_caching_sizes(GetAction, &l1, &l2, &l3);

  if (num_threads > 1) {
    typedef typename Traits::ResScalar ResScalar;
    enum {
      kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
      ksub = Traits::mr * Traits::nr * sizeof(ResScalar),
      kr = 8,
      mr = Traits::mr,
      nr = Traits::nr
    };
    // Increasing k gives us more time to prefetch the content of the "C"
    // registers. However once the latency is hidden there is no point in
    // increasing the value of k, so we'll cap it at 320 (value determined
    // experimentally).
    const Index k_cache = (numext::mini<Index>)((l1-ksub)/kdiv, 320);
    if (k_cache < k) {
      k = k_cache - (k_cache % kr);
      eigen_internal_assert(k > 0);
    }

    const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k);
    const Index n_per_thread = numext::div_ceil(n, num_threads);
    if (n_cache <= n_per_thread) {
      // Don't exceed the capacity of the l2 cache.
      eigen_internal_assert(n_cache >= static_cast<Index>(nr));
      n = n_cache - (n_cache % nr);
      eigen_internal_assert(n > 0);
    } else {
      n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr));
    }

    if (l3 > l2) {
      // l3 is shared between all cores, so we'll give each thread its own chunk of l3.
      const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);
      const Index m_per_thread = numext::div_ceil(m, num_threads);
      if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
        m = m_cache - (m_cache % mr);
        eigen_internal_assert(m > 0);
      } else {
        m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr));
      }
    }
  }
  else {
    // In unit tests we do not want to use extra large matrices,
    // so we reduce the cache size to check the blocking strategy is not flawed
#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
    l1 = 9*1024;
    l2 = 32*1024;
    l3 = 512*1024;
#endif

    // Early return for small problems because the computation below are time consuming for small problems.
    // Perhaps it would make more sense to consider k*n*m??
    // Note that for very tiny problem, this function should be bypassed anyway
    // because we use the coefficient-based implementation for them.
    if((numext::maxi)(k,(numext::maxi)(m,n))<48)
      return;

    typedef typename Traits::ResScalar ResScalar;
    enum {
      k_peeling = 8,
      k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
      k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)
    };

    // ---- 1st level of blocking on L1, yields kc ----

    // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel
    // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.
    // We also include a register-level block of the result (mx x nr).
    // (In an ideal world only the lhs panel would stay in L1)
    // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:
    const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
    const Index old_k = k;
    if(k>max_kc)
    {
      // We are really blocking on the third dimension:
      // -> reduce blocking size to make sure the last block is as large as possible
      //    while keeping the same number of sweeps over the result.
      k = (k%max_kc)==0 ? max_kc
                        : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));

      eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same");
    }

    // ---- 2nd level of blocking on max(L2,L3), yields nc ----

    // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:
    //      actual_l2 = max(l2, l3/nb_core_sharing_l3)
    // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)
    // For instance, it corresponds to 6MB of L3 shared among 4 cores.
    #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
    const Index actual_l2 = l3;
    #else
    const Index actual_l2 = 1572864; // == 1.5 MB
    #endif

    // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.
    // The second half is implicitly reserved to access the result and lhs coefficients.
    // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful
    // to limit this growth: we bound nc to growth by a factor x1.5.
    // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all,
    // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space.
    Index max_nc;
    const Index lhs_bytes = m * k * sizeof(LhsScalar);
    const Index remaining_l1 = l1- k_sub - lhs_bytes;
    if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k)
    {
      // L1 blocking
      max_nc = remaining_l1 / (k*sizeof(RhsScalar));
    }
    else
    {
      // L2 blocking
      max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));
    }
    // WARNING Below, we assume that Traits::nr is a power of two.
    Index nc = numext::mini<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
    if(n>nc)
    {
      // We are really blocking over the columns:
      // -> reduce blocking size to make sure the last block is as large as possible
      //    while keeping the same number of sweeps over the packed lhs.
      //    Here we allow one more sweep if this gives us a perfect match, thus the commented "-1"
      n = (n%nc)==0 ? nc
                    : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1))));
    }
    else if(old_k==k)
    {
      // So far, no blocking at all, i.e., kc==k, and nc==n.
      // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2
      // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete.
      Index problem_size = k*n*sizeof(LhsScalar);
      Index actual_lm = actual_l2;
      Index max_mc = m;
      if(problem_size<=1024)
      {
        // problem is small enough to keep in L1
        // Let's choose m such that lhs's block fit in 1/3 of L1
        actual_lm = l1;
      }
      else if(l3!=0 && problem_size<=32768)
      {
        // we have both L2 and L3, and problem is small enough to be kept in L2
        // Let's choose m such that lhs's block fit in 1/3 of L2
        actual_lm = l2;
        max_mc = (numext::mini<Index>)(576,max_mc);
      }
      Index mc = (numext::mini<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
      if (mc > Traits::mr) mc -= mc % Traits::mr;
      else if (mc==0) return;
      m = (m%mc)==0 ? mc
                    : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1))));
    }
  }
}

template <typename Index>
inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
{
#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
  if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
    k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
    m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
    n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
    return true;
  }
#else
  EIGEN_UNUSED_VARIABLE(k)
  EIGEN_UNUSED_VARIABLE(m)
  EIGEN_UNUSED_VARIABLE(n)
#endif
  return false;
}

LM's avatar
LM committed
280 281 282 283 284 285 286 287
/** \brief Computes the blocking parameters for a m x k times k x n matrix product
  *
  * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.
  * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.
  * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.
  *
  * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
  * this function computes the blocking size parameters along the respective dimensions
288 289 290 291 292
  * for matrix products and related algorithms.
  *
  * The blocking size parameters may be evaluated:
  *   - either by a heuristic based on cache sizes;
  *   - or using fixed prescribed values (for testing purposes).
LM's avatar
LM committed
293 294 295
  *
  * \sa setCpuCacheSizes */

296 297 298 299 300 301
template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>
void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
{
  if (!useSpecificBlockingSizes(k, m, n)) {
    evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads);
  }
LM's avatar
LM committed
302 303
}

304 305
template<typename LhsScalar, typename RhsScalar, typename Index>
inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
LM's avatar
LM committed
306
{
307
  computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);
LM's avatar
LM committed
308 309
}

310 311
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
  #define CJMADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);
LM's avatar
LM committed
312 313 314 315 316
#else

  // FIXME (a bit overkill maybe ?)

  template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
Don Gagne's avatar
Don Gagne committed
317
    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
LM's avatar
LM committed
318 319 320 321 322 323
    {
      c = cj.pmadd(a,b,c);
    }
  };

  template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
Don Gagne's avatar
Don Gagne committed
324
    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
LM's avatar
LM committed
325 326 327 328 329 330 331 332 333 334 335
    {
      t = b; t = cj.pmul(a,t); c = padd(c,t);
    }
  };

  template<typename CJ, typename A, typename B, typename C, typename T>
  EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
  {
    gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
  }

336 337
  #define CJMADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);
//   #define CJMADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);
LM's avatar
LM committed
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
#endif

/* Vectorization logic
 *  real*real: unpack rhs to constant packets, ...
 * 
 *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
 *          storing each res packet into two packets (2x2),
 *          at the end combine them: swap the second and addsub them 
 *  cf*cf : same but with 2x4 blocks
 *  cplx*real : unpack rhs to constant packets, ...
 *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
 */
template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
class gebp_traits
{
public:
  typedef _LhsScalar LhsScalar;
  typedef _RhsScalar RhsScalar;
356
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
LM's avatar
LM committed
357 358 359 360 361 362 363 364 365 366 367

  enum {
    ConjLhs = _ConjLhs,
    ConjRhs = _ConjRhs,
    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
    
    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,

368 369
    // register block size along the N direction must be 1 or 4
    nr = 4,
LM's avatar
LM committed
370 371

    // register block size along the M direction (currently, this one cannot be modified)
372 373 374 375 376 377 378 379 380
    default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
    // we assume 16 registers
    // See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined,
    // then using 3*LhsPacketSize triggers non-implemented paths in syrk.
    mr = Vectorizable ? 3*LhsPacketSize : default_mr,
#else
    mr = default_mr,
#endif
LM's avatar
LM committed
381 382
    
    LhsProgress = LhsPacketSize,
383
    RhsProgress = 1
LM's avatar
LM committed
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
  };

  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
  typedef typename packet_traits<ResScalar>::type  _ResPacket;

  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;

  typedef ResPacket AccPacket;
  
  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
  {
    p = pset1<ResPacket>(ResScalar(0));
  }
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
  
  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
  {
    pbroadcast4(b, b0, b1, b2, b3);
  }
  
//   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
//   {
//     pbroadcast2(b, b0, b1);
//   }
  
  template<typename RhsPacketType>
  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
  {
    dest = pset1<RhsPacketType>(*b);
  }
  
  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
LM's avatar
LM committed
418
  {
419
    dest = ploadquad<RhsPacket>(b);
LM's avatar
LM committed
420 421
  }

422 423
  template<typename LhsPacketType>
  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const
LM's avatar
LM committed
424
  {
425
    dest = pload<LhsPacketType>(a);
LM's avatar
LM committed
426 427
  }

428 429
  template<typename LhsPacketType>
  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
LM's avatar
LM committed
430
  {
431
    dest = ploadu<LhsPacketType>(a);
LM's avatar
LM committed
432 433
  }

434 435
  template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
  EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const
LM's avatar
LM committed
436
  {
437 438 439 440 441 442 443 444 445 446 447
    conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj;
    // It would be a lot cleaner to call pmadd all the time. Unfortunately if we
    // let gcc allocate the register in which to store the result of the pmul
    // (in the case where there is no FMA) gcc fails to figure out how to avoid
    // spilling register.
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
    EIGEN_UNUSED_VARIABLE(tmp);
    c = cj.pmadd(a,b,c);
#else
    tmp = b; tmp = cj.pmul(a,tmp); c = padd(c,tmp);
#endif
LM's avatar
LM committed
448 449 450 451 452 453
  }

  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
  {
    r = pmadd(c,alpha,r);
  }
454 455 456 457 458 459
  
  template<typename ResPacketHalf>
  EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const
  {
    r = pmadd(c,alpha,r);
  }
LM's avatar
LM committed
460 461 462 463 464 465 466 467 468

};

template<typename RealScalar, bool _ConjLhs>
class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
{
public:
  typedef std::complex<RealScalar> LhsScalar;
  typedef RealScalar RhsScalar;
469
  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
LM's avatar
LM committed
470 471 472 473 474 475 476 477 478 479

  enum {
    ConjLhs = _ConjLhs,
    ConjRhs = false,
    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
    
    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
480 481 482 483 484 485 486
    nr = 4,
#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
    // we assume 16 registers
    mr = 3*LhsPacketSize,
#else
    mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
#endif
LM's avatar
LM committed
487 488

    LhsProgress = LhsPacketSize,
489
    RhsProgress = 1
LM's avatar
LM committed
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
  };

  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
  typedef typename packet_traits<ResScalar>::type  _ResPacket;

  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;

  typedef ResPacket AccPacket;

  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
  {
    p = pset1<ResPacket>(ResScalar(0));
  }

507
  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
LM's avatar
LM committed
508
  {
509
    dest = pset1<RhsPacket>(*b);
LM's avatar
LM committed
510
  }
511 512
  
  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
LM's avatar
LM committed
513
  {
514
    dest = pset1<RhsPacket>(*b);
LM's avatar
LM committed
515 516 517 518 519 520 521
  }

  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
  {
    dest = pload<LhsPacket>(a);
  }

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
  {
    dest = ploadu<LhsPacket>(a);
  }

  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
  {
    pbroadcast4(b, b0, b1, b2, b3);
  }
  
//   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
//   {
//     pbroadcast2(b, b0, b1);
//   }

LM's avatar
LM committed
537 538 539 540 541 542 543
  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
  {
    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
  }

  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
  {
544 545 546 547
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
    EIGEN_UNUSED_VARIABLE(tmp);
    c.v = pmadd(a.v,b,c.v);
#else
LM's avatar
LM committed
548
    tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
549
#endif
LM's avatar
LM committed
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
  }

  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
  {
    c += a * b;
  }

  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
  {
    r = cj.pmadd(c,alpha,r);
  }

protected:
  conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
};

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
template<typename Packet>
struct DoublePacket
{
  Packet first;
  Packet second;
};

template<typename Packet>
DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
{
  DoublePacket<Packet> res;
  res.first  = padd(a.first, b.first);
  res.second = padd(a.second,b.second);
  return res;
}

template<typename Packet>
const DoublePacket<Packet>& predux_downto4(const DoublePacket<Packet> &a)
{
  return a;
}

template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };
// template<typename Packet>
// DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
// {
//   DoublePacket<Packet> res;
//   res.first  = padd(a.first, b.first);
//   res.second = padd(a.second,b.second);
//   return res;
// }

LM's avatar
LM committed
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
{
public:
  typedef std::complex<RealScalar>  Scalar;
  typedef std::complex<RealScalar>  LhsScalar;
  typedef std::complex<RealScalar>  RhsScalar;
  typedef std::complex<RealScalar>  ResScalar;
  
  enum {
    ConjLhs = _ConjLhs,
    ConjRhs = _ConjRhs,
    Vectorizable = packet_traits<RealScalar>::Vectorizable
                && packet_traits<Scalar>::Vectorizable,
    RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,
    ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,
614 615 616 617 618 619
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,

    // FIXME: should depend on NumberOfRegisters
    nr = 4,
    mr = ResPacketSize,
LM's avatar
LM committed
620 621

    LhsProgress = ResPacketSize,
622
    RhsProgress = 1
LM's avatar
LM committed
623 624 625 626
  };
  
  typedef typename packet_traits<RealScalar>::type RealPacket;
  typedef typename packet_traits<Scalar>::type     ScalarPacket;
627
  typedef DoublePacket<RealPacket> DoublePacketType;
LM's avatar
LM committed
628 629

  typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;
630
  typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
LM's avatar
LM committed
631
  typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
632
  typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
LM's avatar
LM committed
633 634 635
  
  EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }

636
  EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p)
LM's avatar
LM committed
637 638 639 640 641
  {
    p.first   = pset1<RealPacket>(RealScalar(0));
    p.second  = pset1<RealPacket>(RealScalar(0));
  }

642 643
  // Scalar path
  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const
LM's avatar
LM committed
644
  {
645
    dest = pset1<ResPacket>(*b);
LM's avatar
LM committed
646 647
  }

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
  // Vectorized path
  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const
  {
    dest.first  = pset1<RealPacket>(real(*b));
    dest.second = pset1<RealPacket>(imag(*b));
  }
  
  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
  {
    loadRhs(b,dest);
  }
  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
  {
    eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
    loadRhs(b,dest);
  }
  
  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
LM's avatar
LM committed
666
  {
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
    // FIXME not sure that's the best way to implement it!
    loadRhs(b+0, b0);
    loadRhs(b+1, b1);
    loadRhs(b+2, b2);
    loadRhs(b+3, b3);
  }
  
  // Vectorized path
  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
  {
    // FIXME not sure that's the best way to implement it!
    loadRhs(b+0, b0);
    loadRhs(b+1, b1);
  }
  
  // Scalar path
  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
  {
    // FIXME not sure that's the best way to implement it!
    loadRhs(b+0, b0);
    loadRhs(b+1, b1);
LM's avatar
LM committed
688 689 690 691 692 693 694 695
  }

  // nothing special here
  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
  {
    dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
  }

696 697 698 699 700 701
  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
  {
    dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
  }

  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const
LM's avatar
LM committed
702 703 704 705 706 707 708 709 710 711 712 713
  {
    c.first   = padd(pmul(a,b.first), c.first);
    c.second  = padd(pmul(a,b.second),c.second);
  }

  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
  {
    c = cj.pmadd(a,b,c);
  }
  
  EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
  
714
  EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const
LM's avatar
LM committed
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
  {
    // assemble c
    ResPacket tmp;
    if((!ConjLhs)&&(!ConjRhs))
    {
      tmp = pcplxflip(pconj(ResPacket(c.second)));
      tmp = padd(ResPacket(c.first),tmp);
    }
    else if((!ConjLhs)&&(ConjRhs))
    {
      tmp = pconj(pcplxflip(ResPacket(c.second)));
      tmp = padd(ResPacket(c.first),tmp);
    }
    else if((ConjLhs)&&(!ConjRhs))
    {
      tmp = pcplxflip(ResPacket(c.second));
      tmp = padd(pconj(ResPacket(c.first)),tmp);
    }
    else if((ConjLhs)&&(ConjRhs))
    {
      tmp = pcplxflip(ResPacket(c.second));
      tmp = psub(pconj(ResPacket(c.first)),tmp);
    }
    
    r = pmadd(tmp,alpha,r);
  }

protected:
  conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
};

template<typename RealScalar, bool _ConjRhs>
class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
{
public:
  typedef std::complex<RealScalar>  Scalar;
  typedef RealScalar  LhsScalar;
  typedef Scalar      RhsScalar;
  typedef Scalar      ResScalar;

  enum {
    ConjLhs = false,
    ConjRhs = _ConjRhs,
    Vectorizable = packet_traits<RealScalar>::Vectorizable
                && packet_traits<Scalar>::Vectorizable,
    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
    
    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
765
    // FIXME: should depend on NumberOfRegisters
LM's avatar
LM committed
766
    nr = 4,
767
    mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
LM's avatar
LM committed
768 769

    LhsProgress = ResPacketSize,
770
    RhsProgress = 1
LM's avatar
LM committed
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
  };

  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
  typedef typename packet_traits<ResScalar>::type  _ResPacket;

  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;

  typedef ResPacket AccPacket;

  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
  {
    p = pset1<ResPacket>(ResScalar(0));
  }

788
  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
LM's avatar
LM committed
789
  {
790
    dest = pset1<RhsPacket>(*b);
LM's avatar
LM committed
791
  }
792 793
  
  void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
LM's avatar
LM committed
794
  {
795
    pbroadcast4(b, b0, b1, b2, b3);
LM's avatar
LM committed
796
  }
797 798 799 800 801 802 803
  
//   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
//   {
//     // FIXME not sure that's the best way to implement it!
//     b0 = pload1<RhsPacket>(b+0);
//     b1 = pload1<RhsPacket>(b+1);
//   }
LM's avatar
LM committed
804 805 806 807 808

  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
  {
    dest = ploaddup<LhsPacket>(a);
  }
809 810 811 812 813 814 815 816 817 818 819
  
  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
  {
    eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
    loadRhs(b,dest);
  }

  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
  {
    dest = ploaddup<LhsPacket>(a);
  }
LM's avatar
LM committed
820 821 822 823 824 825 826 827

  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
  {
    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
  }

  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
  {
828 829 830 831
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
    EIGEN_UNUSED_VARIABLE(tmp);
    c.v = pmadd(a,b.v,c.v);
#else
LM's avatar
LM committed
832
    tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
833 834
#endif
    
LM's avatar
LM committed
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
  }

  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
  {
    c += a * b;
  }

  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
  {
    r = cj.pmadd(alpha,c,r);
  }

protected:
  conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
};

/* optimized GEneral packed Block * packed Panel product kernel
 *
 * Mixing type logic: C += A * B
 *  |  A  |  B  | comments
 *  |real |cplx | no vectorization yet, would require to pack A with duplication
 *  |cplx |real | easy vectorization
 */
858
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
LM's avatar
LM committed
859 860 861 862 863 864 865 866 867
struct gebp_kernel
{
  typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
  typedef typename Traits::ResScalar ResScalar;
  typedef typename Traits::LhsPacket LhsPacket;
  typedef typename Traits::RhsPacket RhsPacket;
  typedef typename Traits::ResPacket ResPacket;
  typedef typename Traits::AccPacket AccPacket;

868 869 870 871 872 873 874 875 876
  typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
  typedef typename SwappedTraits::ResScalar SResScalar;
  typedef typename SwappedTraits::LhsPacket SLhsPacket;
  typedef typename SwappedTraits::RhsPacket SRhsPacket;
  typedef typename SwappedTraits::ResPacket SResPacket;
  typedef typename SwappedTraits::AccPacket SAccPacket;

  typedef typename DataMapper::LinearMapper LinearMapper;

LM's avatar
LM committed
877 878 879 880 881 882 883
  enum {
    Vectorizable  = Traits::Vectorizable,
    LhsProgress   = Traits::LhsProgress,
    RhsProgress   = Traits::RhsProgress,
    ResPacketSize = Traits::ResPacketSize
  };

Don Gagne's avatar
Don Gagne committed
884
  EIGEN_DONT_INLINE
885 886 887
  void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,
                  Index rows, Index depth, Index cols, ResScalar alpha,
                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
Don Gagne's avatar
Don Gagne committed
888 889
};

890
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
Don Gagne's avatar
Don Gagne committed
891
EIGEN_DONT_INLINE
892 893 894 895
void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
  ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,
               Index rows, Index depth, Index cols, ResScalar alpha,
               Index strideA, Index strideB, Index offsetA, Index offsetB)
LM's avatar
LM committed
896 897
  {
    Traits traits;
898
    SwappedTraits straits;
LM's avatar
LM committed
899 900 901 902
    
    if(strideA==-1) strideA = depth;
    if(strideB==-1) strideB = depth;
    conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
903 904 905 906 907 908 909 910 911 912 913 914 915
    Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
    const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
    const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
    const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
    enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)
    const Index peeled_kc  = depth & ~(pk-1);
    const Index prefetch_res_offset = 32/sizeof(ResScalar);    
//     const Index depth2     = depth & ~1;

    //---------- Process 3 * LhsProgress rows at once ----------
    // This corresponds to 3*LhsProgress x nr register blocks.
    // Usually, make sense only with FMA
    if(mr>=3*Traits::LhsProgress)
LM's avatar
LM committed
916
    {
917 918 919 920 921 922 923 924 925 926 927
      // Here, the general idea is to loop on each largest micro horizontal panel of the lhs (3*Traits::LhsProgress x depth)
      // and on each largest micro vertical panel of the rhs (depth * nr).
      // Blocking sizes, i.e., 'depth' has been computed so that the micro horizontal panel of the lhs fit in L1.
      // However, if depth is too small, we can extend the number of rows of these horizontal panels.
      // This actual number of rows is computed as follow:
      const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.
      // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size
      // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),
      // or because we are testing specific blocking sizes.
      const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 3*LhsProgress) ));
      for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)
LM's avatar
LM committed
928
      {
929 930
        const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);
        for(Index j2=0; j2<packet_cols4; j2+=nr)
LM's avatar
LM committed
931
        {
932
          for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
LM's avatar
LM committed
933
          {
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
          
          // We selected a 3*Traits::LhsProgress x nr micro block of res which is entirely
          // stored into 3 x nr registers.
          
          const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];
          prefetch(&blA[0]);

          // gets res block as register
          AccPacket C0, C1, C2,  C3,
                    C4, C5, C6,  C7,
                    C8, C9, C10, C11;
          traits.initAcc(C0);  traits.initAcc(C1);  traits.initAcc(C2);  traits.initAcc(C3);
          traits.initAcc(C4);  traits.initAcc(C5);  traits.initAcc(C6);  traits.initAcc(C7);
          traits.initAcc(C8);  traits.initAcc(C9);  traits.initAcc(C10); traits.initAcc(C11);

          LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
          LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
          LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
          LinearMapper r3 = res.getLinearMapper(i, j2 + 3);

          r0.prefetch(0);
          r1.prefetch(0);
          r2.prefetch(0);
          r3.prefetch(0);

          // performs "inner" products
          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
          prefetch(&blB[0]);
          LhsPacket A0, A1;
LM's avatar
LM committed
963

964
          for(Index k=0; k<peeled_kc; k+=pk)
LM's avatar
LM committed
965
          {
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
            EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX4");
            RhsPacket B_0, T0;
            LhsPacket A2;

#define EIGEN_GEBP_ONESTEP(K) \
            do { \
              EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
              EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
              internal::prefetch(blA+(3*K+16)*LhsProgress); \
              if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \
              traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \
              traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \
              traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \
              traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \
              traits.madd(A0, B_0, C0, T0); \
              traits.madd(A1, B_0, C4, T0); \
              traits.madd(A2, B_0, C8, B_0); \
              traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \
              traits.madd(A0, B_0, C1, T0); \
              traits.madd(A1, B_0, C5, T0); \
              traits.madd(A2, B_0, C9, B_0); \
              traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \
              traits.madd(A0, B_0, C2,  T0); \
              traits.madd(A1, B_0, C6,  T0); \
              traits.madd(A2, B_0, C10, B_0); \
              traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \
              traits.madd(A0, B_0, C3 , T0); \
              traits.madd(A1, B_0, C7,  T0); \
              traits.madd(A2, B_0, C11, B_0); \
              EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
            } while(false)

            internal::prefetch(blB);
            EIGEN_GEBP_ONESTEP(0);
            EIGEN_GEBP_ONESTEP(1);
            EIGEN_GEBP_ONESTEP(2);
            EIGEN_GEBP_ONESTEP(3);
            EIGEN_GEBP_ONESTEP(4);
            EIGEN_GEBP_ONESTEP(5);
            EIGEN_GEBP_ONESTEP(6);
            EIGEN_GEBP_ONESTEP(7);

            blB += pk*4*RhsProgress;
            blA += pk*3*Traits::LhsProgress;

            EIGEN_ASM_COMMENT("end gebp micro kernel 3pX4");
LM's avatar
LM committed
1012
          }
1013 1014
          // process remaining peeled loop
          for(Index k=peeled_kc; k<depth; k++)
LM's avatar
LM committed
1015
          {
1016 1017 1018 1019 1020
            RhsPacket B_0, T0;
            LhsPacket A2;
            EIGEN_GEBP_ONESTEP(0);
            blB += 4*RhsProgress;
            blA += 3*Traits::LhsProgress;
LM's avatar
LM committed
1021 1022
          }

1023
#undef EIGEN_GEBP_ONESTEP
LM's avatar
LM committed
1024

1025
          ResPacket R0, R1, R2;
LM's avatar
LM committed
1026 1027
          ResPacket alphav = pset1<ResPacket>(alpha);

1028 1029 1030
          R0 = r0.loadPacket(0 * Traits::ResPacketSize);
          R1 = r0.loadPacket(1 * Traits::ResPacketSize);
          R2 = r0.loadPacket(2 * Traits::ResPacketSize);
LM's avatar
LM committed
1031
          traits.acc(C0, alphav, R0);
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
          traits.acc(C4, alphav, R1);
          traits.acc(C8, alphav, R2);
          r0.storePacket(0 * Traits::ResPacketSize, R0);
          r0.storePacket(1 * Traits::ResPacketSize, R1);
          r0.storePacket(2 * Traits::ResPacketSize, R2);

          R0 = r1.loadPacket(0 * Traits::ResPacketSize);
          R1 = r1.loadPacket(1 * Traits::ResPacketSize);
          R2 = r1.loadPacket(2 * Traits::ResPacketSize);
          traits.acc(C1, alphav, R0);
          traits.acc(C5, alphav, R1);
          traits.acc(C9, alphav, R2);
          r1.storePacket(0 * Traits::ResPacketSize, R0);
          r1.storePacket(1 * Traits::ResPacketSize, R1);
          r1.storePacket(2 * Traits::ResPacketSize, R2);

          R0 = r2.loadPacket(0 * Traits::ResPacketSize);
          R1 = r2.loadPacket(1 * Traits::ResPacketSize);
          R2 = r2.loadPacket(2 * Traits::ResPacketSize);
          traits.acc(C2, alphav, R0);
          traits.acc(C6, alphav, R1);
          traits.acc(C10, alphav, R2);
          r2.storePacket(0 * Traits::ResPacketSize, R0);
          r2.storePacket(1 * Traits::ResPacketSize, R1);
          r2.storePacket(2 * Traits::ResPacketSize, R2);

          R0 = r3.loadPacket(0 * Traits::ResPacketSize);
          R1 = r3.loadPacket(1 * Traits::ResPacketSize);
          R2 = r3.loadPacket(2 * Traits::ResPacketSize);
          traits.acc(C3, alphav, R0);
          traits.acc(C7, alphav, R1);
          traits.acc(C11, alphav, R2);
          r3.storePacket(0 * Traits::ResPacketSize, R0);
          r3.storePacket(1 * Traits::ResPacketSize, R1);
          r3.storePacket(2 * Traits::ResPacketSize, R2);          
          }
LM's avatar
LM committed
1068
        }
1069 1070 1071

        // Deal with remaining columns of the rhs
        for(Index j2=packet_cols4; j2<cols; j2++)
LM's avatar
LM committed
1072
        {
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
          for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
          {
          // One column at a time
          const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
          prefetch(&blA[0]);

          // gets res block as register
          AccPacket C0, C4, C8;
          traits.initAcc(C0);
          traits.initAcc(C4);
          traits.initAcc(C8);

          LinearMapper r0 = res.getLinearMapper(i, j2);
          r0.prefetch(0);

          // performs "inner" products
          const RhsScalar* blB = &blockB[j2*strideB+offsetB];
          LhsPacket A0, A1, A2;
          
          for(Index k=0; k<peeled_kc; k+=pk)
          {
            EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX1");
            RhsPacket B_0;
#define EIGEN_GEBGP_ONESTEP(K) \
            do { \
              EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
              EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
              traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \
              traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \
              traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \
              traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);   \
              traits.madd(A0, B_0, C0, B_0); \
              traits.madd(A1, B_0, C4, B_0); \
              traits.madd(A2, B_0, C8, B_0); \
              EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
            } while(false)
        
            EIGEN_GEBGP_ONESTEP(0);
            EIGEN_GEBGP_ONESTEP(1);
            EIGEN_GEBGP_ONESTEP(2);
            EIGEN_GEBGP_ONESTEP(3);
            EIGEN_GEBGP_ONESTEP(4);
            EIGEN_GEBGP_ONESTEP(5);
            EIGEN_GEBGP_ONESTEP(6);
            EIGEN_GEBGP_ONESTEP(7);

            blB += pk*RhsProgress;
            blA += pk*3*Traits::LhsProgress;

            EIGEN_ASM_COMMENT("end gebp micro kernel 3pX1");
          }

          // process remaining peeled loop
          for(Index k=peeled_kc; k<depth; k++)
          {
            RhsPacket B_0;
            EIGEN_GEBGP_ONESTEP(0);
            blB += RhsProgress;
            blA += 3*Traits::LhsProgress;
          }
#undef EIGEN_GEBGP_ONESTEP
          ResPacket R0, R1, R2;
LM's avatar
LM committed
1135 1136
          ResPacket alphav = pset1<ResPacket>(alpha);

1137 1138 1139
          R0 = r0.loadPacket(0 * Traits::ResPacketSize);
          R1 = r0.loadPacket(1 * Traits::ResPacketSize);
          R2 = r0.loadPacket(2 * Traits::ResPacketSize);
LM's avatar
LM committed
1140
          traits.acc(C0, alphav, R0);
1141 1142 1143 1144 1145 1146
          traits.acc(C4, alphav, R1);
          traits.acc(C8, alphav, R2);
          r0.storePacket(0 * Traits::ResPacketSize, R0);
          r0.storePacket(1 * Traits::ResPacketSize, R1);
          r0.storePacket(2 * Traits::ResPacketSize, R2);          
          }
LM's avatar
LM committed
1147 1148
        }
      }
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
    }

    //---------- Process 2 * LhsProgress rows at once ----------
    if(mr>=2*Traits::LhsProgress)
    {
      const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.
      // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size
      // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),
      // or because we are testing specific blocking sizes.
      Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 2*LhsProgress) ));

      for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)
LM's avatar
LM committed
1161
      {
1162 1163
        Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);
        for(Index j2=0; j2<packet_cols4; j2+=nr)
LM's avatar
LM committed
1164
        {
1165
          for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
LM's avatar
LM committed
1166
          {
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
          
          // We selected a 2*Traits::LhsProgress x nr micro block of res which is entirely
          // stored into 2 x nr registers.
          
          const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
          prefetch(&blA[0]);

          // gets res block as register
          AccPacket C0, C1, C2, C3,
                    C4, C5, C6, C7;
          traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
          traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);

          LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
          LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
          LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
          LinearMapper r3 = res.getLinearMapper(i, j2 + 3);

          r0.prefetch(prefetch_res_offset);
          r1.prefetch(prefetch_res_offset);
          r2.prefetch(prefetch_res_offset);
          r3.prefetch(prefetch_res_offset);

          // performs "inner" products
          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
          prefetch(&blB[0]);
          LhsPacket A0, A1;
LM's avatar
LM committed
1194

1195
          for(Index k=0; k<peeled_kc; k+=pk)
LM's avatar
LM committed
1196
          {
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
            EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX4");
            RhsPacket B_0, B1, B2, B3, T0;

          // NOTE: the begin/end asm comments below work around bug 935!
          // but they are not enough for gcc>=6 without FMA (bug 1637)
          #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
            #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__  ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1));
          #else
            #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND
          #endif
          #define EIGEN_GEBGP_ONESTEP(K) \
            do {                                                                \
              EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4");        \
              traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                    \
              traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                    \
              traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \
              traits.madd(A0, B_0, C0, T0);                                     \
              traits.madd(A1, B_0, C4, B_0);                                    \
              traits.madd(A0, B1,  C1, T0);                                     \
              traits.madd(A1, B1,  C5, B1);                                     \
              traits.madd(A0, B2,  C2, T0);                                     \
              traits.madd(A1, B2,  C6, B2);                                     \
              traits.madd(A0, B3,  C3, T0);                                     \
              traits.madd(A1, B3,  C7, B3);                                     \
              EIGEN_GEBP_2PX4_SPILLING_WORKAROUND                               \
              EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4");          \
            } while(false)
            
            internal::prefetch(blB+(48+0));
            EIGEN_GEBGP_ONESTEP(0);
            EIGEN_GEBGP_ONESTEP(1);
            EIGEN_GEBGP_ONESTEP(2);
            EIGEN_GEBGP_ONESTEP(3);
            internal::prefetch(blB+(48+16));
            EIGEN_GEBGP_ONESTEP(4);
            EIGEN_GEBGP_ONESTEP(5);
            EIGEN_GEBGP_ONESTEP(6);
            EIGEN_GEBGP_ONESTEP(7);

            blB += pk*4*RhsProgress;
            blA += pk*(2*Traits::LhsProgress);

            EIGEN_ASM_COMMENT("end gebp micro kernel 2pX4");
LM's avatar
LM committed
1240
          }
1241 1242
          // process remaining peeled loop
          for(Index k=peeled_kc; k<depth; k++)
LM's avatar
LM committed
1243
          {
1244 1245 1246 1247
            RhsPacket B_0, B1, B2, B3, T0;
            EIGEN_GEBGP_ONESTEP(0);
            blB += 4*RhsProgress;
            blA += 2*Traits::LhsProgress;
LM's avatar
LM committed
1248
          }
1249
#undef EIGEN_GEBGP_ONESTEP
LM's avatar
LM committed
1250

1251 1252
          ResPacket R0, R1, R2, R3;
          ResPacket alphav = pset1<ResPacket>(alpha);
LM's avatar
LM committed
1253

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
          R0 = r0.loadPacket(0 * Traits::ResPacketSize);
          R1 = r0.loadPacket(1 * Traits::ResPacketSize);
          R2 = r1.loadPacket(0 * Traits::ResPacketSize);
          R3 = r1.loadPacket(1 * Traits::ResPacketSize);
          traits.acc(C0, alphav, R0);
          traits.acc(C4, alphav, R1);
          traits.acc(C1, alphav, R2);
          traits.acc(C5, alphav, R3);
          r0.storePacket(0 * Traits::ResPacketSize, R0);
          r0.storePacket(1 * Traits::ResPacketSize, R1);
          r1.storePacket(0 * Traits::ResPacketSize, R2);
          r1.storePacket(1 * Traits::ResPacketSize, R3);

          R0 = r2.loadPacket(0 * Traits::ResPacketSize);
          R1 = r2.loadPacket(1 * Traits::ResPacketSize);
          R2 = r3.loadPacket(0 * Traits::ResPacketSize);
          R3 = r3.loadPacket(1 * Traits::ResPacketSize);
          traits.acc(C2,  alphav, R0);
          traits.acc(C6,  alphav, R1);
          traits.acc(C3,  alphav, R2);
          traits.acc(C7,  alphav, R3);
          r2.storePacket(0 * Traits::ResPacketSize, R0);
          r2.storePacket(1 * Traits::ResPacketSize, R1);
          r3.storePacket(0 * Traits::ResPacketSize, R2);
          r3.storePacket(1 * Traits::ResPacketSize, R3);
LM's avatar
LM committed
1279 1280
          }
        }
1281 1282 1283 1284 1285 1286 1287 1288 1289
      
        // Deal with remaining columns of the rhs
        for(Index j2=packet_cols4; j2<cols; j2++)
        {
          for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
          {
          // One column at a time
          const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
          prefetch(&blA[0]);
LM's avatar
LM committed
1290

1291 1292 1293 1294
          // gets res block as register
          AccPacket C0, C4;
          traits.initAcc(C0);
          traits.initAcc(C4);
LM's avatar
LM committed
1295

1296 1297
          LinearMapper r0 = res.getLinearMapper(i, j2);
          r0.prefetch(prefetch_res_offset);
LM's avatar
LM committed
1298

1299 1300 1301
          // performs "inner" products
          const RhsScalar* blB = &blockB[j2*strideB+offsetB];
          LhsPacket A0, A1;
LM's avatar
LM committed
1302

1303
          for(Index k=0; k<peeled_kc; k+=pk)
LM's avatar
LM committed
1304
          {
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
            EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX1");
            RhsPacket B_0, B1;
        
#define EIGEN_GEBGP_ONESTEP(K) \
            do {                                                                  \
              EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1");          \
              EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
              traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                      \
              traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                      \
              traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                       \
              traits.madd(A0, B_0, C0, B1);                                       \
              traits.madd(A1, B_0, C4, B_0);                                      \
              EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1");            \
            } while(false)
        
            EIGEN_GEBGP_ONESTEP(0);
            EIGEN_GEBGP_ONESTEP(1);
            EIGEN_GEBGP_ONESTEP(2);
            EIGEN_GEBGP_ONESTEP(3);
            EIGEN_GEBGP_ONESTEP(4);
            EIGEN_GEBGP_ONESTEP(5);
            EIGEN_GEBGP_ONESTEP(6);
            EIGEN_GEBGP_ONESTEP(7);

            blB += pk*RhsProgress;
            blA += pk*2*Traits::LhsProgress;

            EIGEN_ASM_COMMENT("end gebp micro kernel 2pX1");
LM's avatar
LM committed
1333
          }
1334 1335 1336

          // process remaining peeled loop
          for(Index k=peeled_kc; k<depth; k++)
LM's avatar
LM committed
1337
          {
1338 1339 1340 1341
            RhsPacket B_0, B1;
            EIGEN_GEBGP_ONESTEP(0);
            blB += RhsProgress;
            blA += 2*Traits::LhsProgress;
LM's avatar
LM committed
1342
          }
1343 1344 1345
#undef EIGEN_GEBGP_ONESTEP
          ResPacket R0, R1;
          ResPacket alphav = pset1<ResPacket>(alpha);
LM's avatar
LM committed
1346

1347 1348 1349 1350 1351 1352 1353
          R0 = r0.loadPacket(0 * Traits::ResPacketSize);
          R1 = r0.loadPacket(1 * Traits::ResPacketSize);
          traits.acc(C0, alphav, R0);
          traits.acc(C4, alphav, R1);
          r0.storePacket(0 * Traits::ResPacketSize, R0);
          r0.storePacket(1 * Traits::ResPacketSize, R1);
          }
LM's avatar
LM committed
1354 1355 1356
        }
      }
    }
1357 1358
    //---------- Process 1 * LhsProgress rows at once ----------
    if(mr>=1*Traits::LhsProgress)
LM's avatar
LM committed
1359
    {
1360 1361
      // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)
      for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
LM's avatar
LM committed
1362
      {
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
        // loops on each largest micro vertical panel of rhs (depth * nr)
        for(Index j2=0; j2<packet_cols4; j2+=nr)
        {
          // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely
          // stored into 1 x nr registers.
          
          const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
          prefetch(&blA[0]);

          // gets res block as register
          AccPacket C0, C1, C2, C3;
          traits.initAcc(C0);
          traits.initAcc(C1);
          traits.initAcc(C2);
          traits.initAcc(C3);

          LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
          LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
          LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
          LinearMapper r3 = res.getLinearMapper(i, j2 + 3);

          r0.prefetch(prefetch_res_offset);
          r1.prefetch(prefetch_res_offset);
          r2.prefetch(prefetch_res_offset);
          r3.prefetch(prefetch_res_offset);

          // performs "inner" products
          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
          prefetch(&blB[0]);
          LhsPacket A0;

          for(Index k=0; k<peeled_kc; k+=pk)
          {
            EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX4");
            RhsPacket B_0, B1, B2, B3;
               
#define EIGEN_GEBGP_ONESTEP(K) \
            do {                                                                \
              EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4");        \
              EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
              traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \
              traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \
              traits.madd(A0, B_0, C0, B_0);                                    \
              traits.madd(A0, B1,  C1, B1);                                     \
              traits.madd(A0, B2,  C2, B2);                                     \
              traits.madd(A0, B3,  C3, B3);                                     \
              EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4");          \
            } while(false)
            
            internal::prefetch(blB+(48+0));
            EIGEN_GEBGP_ONESTEP(0);
            EIGEN_GEBGP_ONESTEP(1);
            EIGEN_GEBGP_ONESTEP(2);
            EIGEN_GEBGP_ONESTEP(3);
            internal::prefetch(blB+(48+16));
            EIGEN_GEBGP_ONESTEP(4);
            EIGEN_GEBGP_ONESTEP(5);
            EIGEN_GEBGP_ONESTEP(6);
            EIGEN_GEBGP_ONESTEP(7);

            blB += pk*4*RhsProgress;
            blA += pk*1*LhsProgress;

            EIGEN_ASM_COMMENT("end gebp micro kernel 1pX4");
          }
          // process remaining peeled loop
          for(Index k=peeled_kc; k<depth; k++)
          {
            RhsPacket B_0, B1, B2, B3;
            EIGEN_GEBGP_ONESTEP(0);
            blB += 4*RhsProgress;
            blA += 1*LhsProgress;
          }
#undef EIGEN_GEBGP_ONESTEP
LM's avatar
LM committed
1437

1438 1439
          ResPacket R0, R1;
          ResPacket alphav = pset1<ResPacket>(alpha);
LM's avatar
LM committed
1440

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
          R0 = r0.loadPacket(0 * Traits::ResPacketSize);
          R1 = r1.loadPacket(0 * Traits::ResPacketSize);
          traits.acc(C0, alphav, R0);
          traits.acc(C1,  alphav, R1);
          r0.storePacket(0 * Traits::ResPacketSize, R0);
          r1.storePacket(0 * Traits::ResPacketSize, R1);

          R0 = r2.loadPacket(0 * Traits::ResPacketSize);
          R1 = r3.loadPacket(0 * Traits::ResPacketSize);
          traits.acc(C2,  alphav, R0);
          traits.acc(C3,  alphav, R1);
          r2.storePacket(0 * Traits::ResPacketSize, R0);
          r3.storePacket(0 * Traits::ResPacketSize, R1);
        }
LM's avatar
LM committed
1455

1456 1457
        // Deal with remaining columns of the rhs
        for(Index j2=packet_cols4; j2<cols; j2++)
LM's avatar
LM committed
1458
        {
1459 1460 1461
          // One column at a time
          const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
          prefetch(&blA[0]);
LM's avatar
LM committed
1462

1463 1464 1465
          // gets res block as register
          AccPacket C0;
          traits.initAcc(C0);
LM's avatar
LM committed
1466

1467
          LinearMapper r0 = res.getLinearMapper(i, j2);
LM's avatar
LM committed
1468

1469 1470 1471
          // performs "inner" products
          const RhsScalar* blB = &blockB[j2*strideB+offsetB];
          LhsPacket A0;
LM's avatar
LM committed
1472

1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
          for(Index k=0; k<peeled_kc; k+=pk)
          {
            EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX1");
            RhsPacket B_0;
        
#define EIGEN_GEBGP_ONESTEP(K) \
            do {                                                                \
              EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1");        \
              EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
              traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \
              traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                     \
              traits.madd(A0, B_0, C0, B_0);                                    \
              EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1");          \
            } while(false);

            EIGEN_GEBGP_ONESTEP(0);
            EIGEN_GEBGP_ONESTEP(1);
            EIGEN_GEBGP_ONESTEP(2);
            EIGEN_GEBGP_ONESTEP(3);
            EIGEN_GEBGP_ONESTEP(4);
            EIGEN_GEBGP_ONESTEP(5);
            EIGEN_GEBGP_ONESTEP(6);
            EIGEN_GEBGP_ONESTEP(7);

            blB += pk*RhsProgress;
            blA += pk*1*Traits::LhsProgress;

            EIGEN_ASM_COMMENT("end gebp micro kernel 1pX1");
          }
LM's avatar
LM committed
1502

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
          // process remaining peeled loop
          for(Index k=peeled_kc; k<depth; k++)
          {
            RhsPacket B_0;
            EIGEN_GEBGP_ONESTEP(0);
            blB += RhsProgress;
            blA += 1*Traits::LhsProgress;
          }
#undef EIGEN_GEBGP_ONESTEP
          ResPacket R0;
          ResPacket alphav = pset1<ResPacket>(alpha);
          R0 = r0.loadPacket(0 * Traits::ResPacketSize);
          traits.acc(C0, alphav, R0);
          r0.storePacket(0 * Traits::ResPacketSize, R0);
        }
LM's avatar
LM committed
1518
      }
1519 1520 1521 1522 1523 1524
    }
    //---------- Process remaining rows, 1 at once ----------
    if(peeled_mc1<rows)
    {
      // loop on each panel of the rhs
      for(Index j2=0; j2<packet_cols4; j2+=nr)
LM's avatar
LM committed
1525
      {
1526 1527
        // loop on each row of the lhs (1*LhsProgress x depth)
        for(Index i=peeled_mc1; i<rows; i+=1)
LM's avatar
LM committed
1528
        {
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
          const LhsScalar* blA = &blockA[i*strideA+offsetA];
          prefetch(&blA[0]);
          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];

          // The following piece of code wont work for 512 bit registers
          // Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size
          // as nr (which is currently 4) for the return type.
          const int SResPacketHalfSize = unpacket_traits<typename unpacket_traits<SResPacket>::half>::size;
          if ((SwappedTraits::LhsProgress % 4) == 0 &&
              (SwappedTraits::LhsProgress <= 8) &&
              (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr))
          {
            SAccPacket C0, C1, C2, C3;
            straits.initAcc(C0);
            straits.initAcc(C1);
            straits.initAcc(C2);
            straits.initAcc(C3);

            const Index spk   = (std::max)(1,SwappedTraits::LhsProgress/4);
            const Index endk  = (depth/spk)*spk;
            const Index endk4 = (depth/(spk*4))*(spk*4);

            Index k=0;
            for(; k<endk4; k+=4*spk)
            {
              SLhsPacket A0,A1;
              SRhsPacket B_0,B_1;

              straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
              straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);

              straits.loadRhsQuad(blA+0*spk, B_0);
              straits.loadRhsQuad(blA+1*spk, B_1);
              straits.madd(A0,B_0,C0,B_0);
              straits.madd(A1,B_1,C1,B_1);

              straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
              straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
              straits.loadRhsQuad(blA+2*spk, B_0);
              straits.loadRhsQuad(blA+3*spk, B_1);
              straits.madd(A0,B_0,C2,B_0);
              straits.madd(A1,B_1,C3,B_1);

              blB += 4*SwappedTraits::LhsProgress;
              blA += 4*spk;
            }
            C0 = padd(padd(C0,C1),padd(C2,C3));
            for(; k<endk; k+=spk)
            {
              SLhsPacket A0;
              SRhsPacket B_0;

              straits.loadLhsUnaligned(blB, A0);
              straits.loadRhsQuad(blA, B_0);
              straits.madd(A0,B_0,C0,B_0);

              blB += SwappedTraits::LhsProgress;
              blA += spk;
            }
            if(SwappedTraits::LhsProgress==8)
            {
              // Special case where we have to first reduce the accumulation register C0
              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;

              SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
              SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);

              if(depth-endk>0)
              {
                // We have to handle the last row of the rhs which corresponds to a half-packet
                SLhsPacketHalf a0;
                SRhsPacketHalf b0;
                straits.loadLhsUnaligned(blB, a0);
                straits.loadRhs(blA, b0);
                SAccPacketHalf c0 = predux_downto4(C0);
                straits.madd(a0,b0,c0,b0);
                straits.acc(c0, alphav, R);
              }
              else
              {
                straits.acc(predux_downto4(C0), alphav, R);
              }
              res.scatterPacket(i, j2, R);
            }
            else
            {
              SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
              SResPacket alphav = pset1<SResPacket>(alpha);
              straits.acc(C0, alphav, R);
              res.scatterPacket(i, j2, R);
            }
          }
          else // scalar path
          {
            // get a 1 x 4 res block as registers
            ResScalar C0(0), C1(0), C2(0), C3(0);

            for(Index k=0; k<depth; k++)
            {
              LhsScalar A0;
              RhsScalar B_0, B_1;

              A0 = blA[k];

              B_0 = blB[0];
              B_1 = blB[1];
              CJMADD(cj,A0,B_0,C0,  B_0);
              CJMADD(cj,A0,B_1,C1,  B_1);
              
              B_0 = blB[2];
              B_1 = blB[3];
              CJMADD(cj,A0,B_0,C2,  B_0);
              CJMADD(cj,A0,B_1,C3,  B_1);
              
              blB += 4;
            }
            res(i, j2 + 0) += alpha * C0;
            res(i, j2 + 1) += alpha * C1;
            res(i, j2 + 2) += alpha * C2;
            res(i, j2 + 3) += alpha * C3;
          }
LM's avatar
LM committed
1653 1654
        }
      }
1655 1656
      // remaining columns
      for(Index j2=packet_cols4; j2<cols; j2++)
LM's avatar
LM committed
1657
      {
1658 1659
        // loop on each row of the lhs (1*LhsProgress x depth)
        for(Index i=peeled_mc1; i<rows; i+=1)
LM's avatar
LM committed
1660
        {
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
          const LhsScalar* blA = &blockA[i*strideA+offsetA];
          prefetch(&blA[0]);
          // gets a 1 x 1 res block as registers
          ResScalar C0(0);
          const RhsScalar* blB = &blockB[j2*strideB+offsetB];
          for(Index k=0; k<depth; k++)
          {
            LhsScalar A0 = blA[k];
            RhsScalar B_0 = blB[k];
            CJMADD(cj, A0, B_0, C0, B_0);
          }
          res(i, j2) += alpha * C0;
LM's avatar
LM committed
1673 1674 1675 1676
        }
      }
    }
  }
Don Gagne's avatar
Don Gagne committed
1677

LM's avatar
LM committed
1678 1679 1680 1681

#undef CJMADD

// pack a block of the lhs
Don Gagne's avatar
Don Gagne committed
1682
// The traversal is as follow (mr==4):
LM's avatar
LM committed
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
//   0  4  8 12 ...
//   1  5  9 13 ...
//   2  6 10 14 ...
//   3  7 11 15 ...
//
//  16 20 24 28 ...
//  17 21 25 29 ...
//  18 22 26 30 ...
//  19 23 27 31 ...
//
//  32 33 34 35 ...
//  36 36 38 39 ...
1695 1696
template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
LM's avatar
LM committed
1697
{
1698 1699
  typedef typename DataMapper::LinearMapper LinearMapper;
  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
Don Gagne's avatar
Don Gagne committed
1700 1701
};

1702 1703 1704
template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
  ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
Don Gagne's avatar
Don Gagne committed
1705 1706 1707 1708 1709
{
  typedef typename packet_traits<Scalar>::type Packet;
  enum { PacketSize = packet_traits<Scalar>::size };

  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
1710 1711
  EIGEN_UNUSED_VARIABLE(stride);
  EIGEN_UNUSED_VARIABLE(offset);
Don Gagne's avatar
Don Gagne committed
1712
  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1713
  eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
Don Gagne's avatar
Don Gagne committed
1714 1715
  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
  Index count = 0;
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726

  const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
  const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
  const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
  const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
                         : Pack2>1             ? (rows/Pack2)*Pack2 : 0;

  Index i=0;

  // Pack 3 packets
  if(Pack1>=3*PacketSize)
LM's avatar
LM committed
1727
  {
1728 1729 1730
    for(; i<peeled_mc3; i+=3*PacketSize)
    {
      if(PanelMode) count += (3*PacketSize) * offset;
Don Gagne's avatar
Don Gagne committed
1731

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
      for(Index k=0; k<depth; k++)
      {
        Packet A, B, C;
        A = lhs.loadPacket(i+0*PacketSize, k);
        B = lhs.loadPacket(i+1*PacketSize, k);
        C = lhs.loadPacket(i+2*PacketSize, k);
        pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
        pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
        pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
      }
      if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
    }
  }
  // Pack 2 packets
  if(Pack1>=2*PacketSize)
  {
    for(; i<peeled_mc2; i+=2*PacketSize)
LM's avatar
LM committed
1749
    {
1750 1751
      if(PanelMode) count += (2*PacketSize) * offset;

LM's avatar
LM committed
1752
      for(Index k=0; k<depth; k++)
Don Gagne's avatar
Don Gagne committed
1753
      {
1754 1755 1756 1757 1758
        Packet A, B;
        A = lhs.loadPacket(i+0*PacketSize, k);
        B = lhs.loadPacket(i+1*PacketSize, k);
        pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
        pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
Don Gagne's avatar
Don Gagne committed
1759
      }
1760
      if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
LM's avatar
LM committed
1761
    }
1762 1763 1764 1765 1766
  }
  // Pack 1 packets
  if(Pack1>=1*PacketSize)
  {
    for(; i<peeled_mc1; i+=1*PacketSize)
LM's avatar
LM committed
1767
    {
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
      if(PanelMode) count += (1*PacketSize) * offset;

      for(Index k=0; k<depth; k++)
      {
        Packet A;
        A = lhs.loadPacket(i+0*PacketSize, k);
        pstore(blockA+count, cj.pconj(A));
        count+=PacketSize;
      }
      if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
    }
  }
  // Pack scalars
  if(Pack2<PacketSize && Pack2>1)
  {
    for(; i<peeled_mc0; i+=Pack2)
    {
      if(PanelMode) count += Pack2 * offset;

LM's avatar
LM committed
1787
      for(Index k=0; k<depth; k++)
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
        for(Index w=0; w<Pack2; w++)
          blockA[count++] = cj(lhs(i+w, k));

      if(PanelMode) count += Pack2 * (stride-offset-depth);
    }
  }
  for(; i<rows; i++)
  {
    if(PanelMode) count += offset;
    for(Index k=0; k<depth; k++)
      blockA[count++] = cj(lhs(i, k));
    if(PanelMode) count += (stride-offset-depth);
  }
}

template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
{
  typedef typename DataMapper::LinearMapper LinearMapper;
  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
};

template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
  ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
  typedef typename packet_traits<Scalar>::type Packet;
  enum { PacketSize = packet_traits<Scalar>::size };

  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
  EIGEN_UNUSED_VARIABLE(stride);
  EIGEN_UNUSED_VARIABLE(offset);
  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
  Index count = 0;

//   const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
//   const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
//   const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;

  int pack = Pack1;
  Index i = 0;
  while(pack>0)
  {
    Index remaining_rows = rows-i;
    Index peeled_mc = i+(remaining_rows/pack)*pack;
    for(; i<peeled_mc; i+=pack)
    {
      if(PanelMode) count += pack * offset;

      const Index peeled_k = (depth/PacketSize)*PacketSize;
      Index k=0;
      if(pack>=PacketSize)
      {
        for(; k<peeled_k; k+=PacketSize)
        {
          for (Index m = 0; m < pack; m += PacketSize)
          {
            PacketBlock<Packet> kernel;
            for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
            ptranspose(kernel);
            for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
          }
          count += PacketSize*pack;
        }
      }
      for(; k<depth; k++)
Don Gagne's avatar
Don Gagne committed
1855 1856
      {
        Index w=0;
1857
        for(; w<pack-3; w+=4)
Don Gagne's avatar
Don Gagne committed
1858 1859
        {
          Scalar a(cj(lhs(i+w+0, k))),
1860 1861 1862
                 b(cj(lhs(i+w+1, k))),
                 c(cj(lhs(i+w+2, k))),
                 d(cj(lhs(i+w+3, k)));
Don Gagne's avatar
Don Gagne committed
1863 1864 1865 1866 1867
          blockA[count++] = a;
          blockA[count++] = b;
          blockA[count++] = c;
          blockA[count++] = d;
        }
1868 1869
        if(pack%4)
          for(;w<pack;++w)
Don Gagne's avatar
Don Gagne committed
1870 1871
            blockA[count++] = cj(lhs(i+w, k));
      }
1872 1873

      if(PanelMode) count += pack * (stride-offset-depth);
LM's avatar
LM committed
1874
    }
1875 1876 1877 1878

    pack -= PacketSize;
    if(pack<Pack2 && (pack+PacketSize)!=Pack2)
      pack = Pack2;
Don Gagne's avatar
Don Gagne committed
1879
  }
1880 1881

  for(; i<rows; i++)
Don Gagne's avatar
Don Gagne committed
1882 1883 1884 1885 1886 1887 1888
  {
    if(PanelMode) count += offset;
    for(Index k=0; k<depth; k++)
      blockA[count++] = cj(lhs(i, k));
    if(PanelMode) count += (stride-offset-depth);
  }
}
LM's avatar
LM committed
1889 1890 1891 1892 1893 1894 1895 1896

// copy a complete panel of the rhs
// this version is optimized for column major matrices
// The traversal order is as follow: (nr==4):
//  0  1  2  3   12 13 14 15   24 27
//  4  5  6  7   16 17 18 19   25 28
//  8  9 10 11   20 21 22 23   26 29
//  .  .  .  .    .  .  .  .    .  .
1897 1898
template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
LM's avatar
LM committed
1899 1900
{
  typedef typename packet_traits<Scalar>::type Packet;
1901
  typedef typename DataMapper::LinearMapper LinearMapper;
LM's avatar
LM committed
1902
  enum { PacketSize = packet_traits<Scalar>::size };
1903
  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
Don Gagne's avatar
Don Gagne committed
1904 1905
};

1906 1907 1908
template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
  ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
Don Gagne's avatar
Don Gagne committed
1909 1910
{
  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
1911 1912
  EIGEN_UNUSED_VARIABLE(stride);
  EIGEN_UNUSED_VARIABLE(offset);
Don Gagne's avatar
Don Gagne committed
1913 1914
  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1915 1916
  Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
  Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
Don Gagne's avatar
Don Gagne committed
1917
  Index count = 0;
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
  const Index peeled_k = (depth/PacketSize)*PacketSize;
//   if(nr>=8)
//   {
//     for(Index j2=0; j2<packet_cols8; j2+=8)
//     {
//       // skip what we have before
//       if(PanelMode) count += 8 * offset;
//       const Scalar* b0 = &rhs[(j2+0)*rhsStride];
//       const Scalar* b1 = &rhs[(j2+1)*rhsStride];
//       const Scalar* b2 = &rhs[(j2+2)*rhsStride];
//       const Scalar* b3 = &rhs[(j2+3)*rhsStride];
//       const Scalar* b4 = &rhs[(j2+4)*rhsStride];
//       const Scalar* b5 = &rhs[(j2+5)*rhsStride];
//       const Scalar* b6 = &rhs[(j2+6)*rhsStride];
//       const Scalar* b7 = &rhs[(j2+7)*rhsStride];
//       Index k=0;
//       if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
//       {
//         for(; k<peeled_k; k+=PacketSize) {
//           PacketBlock<Packet> kernel;
//           for (int p = 0; p < PacketSize; ++p) {
//             kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]);
//           }
//           ptranspose(kernel);
//           for (int p = 0; p < PacketSize; ++p) {
//             pstoreu(blockB+count, cj.pconj(kernel.packet[p]));
//             count+=PacketSize;
//           }
//         }
//       }
//       for(; k<depth; k++)
//       {
//         blockB[count+0] = cj(b0[k]);
//         blockB[count+1] = cj(b1[k]);
//         blockB[count+2] = cj(b2[k]);
//         blockB[count+3] = cj(b3[k]);
//         blockB[count+4] = cj(b4[k]);
//         blockB[count+5] = cj(b5[k]);
//         blockB[count+6] = cj(b6[k]);
//         blockB[count+7] = cj(b7[k]);
//         count += 8;
//       }
//       // skip what we have after
//       if(PanelMode) count += 8 * (stride-offset-depth);
//     }
//   }

  if(nr>=4)
LM's avatar
LM committed
1966
  {
1967
    for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
LM's avatar
LM committed
1968
    {
1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
      // skip what we have before
      if(PanelMode) count += 4 * offset;
      const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
      const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
      const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
      const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);

      Index k=0;
      if((PacketSize%4)==0) // TODO enable vectorized transposition for PacketSize==2 ??
      {
        for(; k<peeled_k; k+=PacketSize) {
          PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
          kernel.packet[0] = dm0.loadPacket(k);
          kernel.packet[1%PacketSize] = dm1.loadPacket(k);
          kernel.packet[2%PacketSize] = dm2.loadPacket(k);
          kernel.packet[3%PacketSize] = dm3.loadPacket(k);
          ptranspose(kernel);
          pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
          pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
          pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));
          pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));
          count+=4*PacketSize;
        }
      }
      for(; k<depth; k++)
      {
        blockB[count+0] = cj(dm0(k));
        blockB[count+1] = cj(dm1(k));
        blockB[count+2] = cj(dm2(k));
        blockB[count+3] = cj(dm3(k));
        count += 4;
      }
      // skip what we have after
      if(PanelMode) count += 4 * (stride-offset-depth);
LM's avatar
LM committed
2003
    }
Don Gagne's avatar
Don Gagne committed
2004
  }
LM's avatar
LM committed
2005

Don Gagne's avatar
Don Gagne committed
2006
  // copy the remaining columns one at a time (nr==1)
2007
  for(Index j2=packet_cols4; j2<cols; ++j2)
Don Gagne's avatar
Don Gagne committed
2008 2009
  {
    if(PanelMode) count += offset;
2010
    const LinearMapper dm0 = rhs.getLinearMapper(0, j2);
Don Gagne's avatar
Don Gagne committed
2011
    for(Index k=0; k<depth; k++)
LM's avatar
LM committed
2012
    {
2013
      blockB[count] = cj(dm0(k));
Don Gagne's avatar
Don Gagne committed
2014
      count += 1;
LM's avatar
LM committed
2015
    }
Don Gagne's avatar
Don Gagne committed
2016
    if(PanelMode) count += (stride-offset-depth);
LM's avatar
LM committed
2017
  }
Don Gagne's avatar
Don Gagne committed
2018
}
LM's avatar
LM committed
2019 2020

// this version is optimized for row major matrices
2021 2022
template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
LM's avatar
LM committed
2023
{
2024 2025
  typedef typename packet_traits<Scalar>::type Packet;
  typedef typename DataMapper::LinearMapper LinearMapper;
LM's avatar
LM committed
2026
  enum { PacketSize = packet_traits<Scalar>::size };
2027
  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
Don Gagne's avatar
Don Gagne committed
2028 2029
};

2030 2031 2032
template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
  ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
Don Gagne's avatar
Don Gagne committed
2033 2034
{
  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
2035 2036
  EIGEN_UNUSED_VARIABLE(stride);
  EIGEN_UNUSED_VARIABLE(offset);
Don Gagne's avatar
Don Gagne committed
2037 2038
  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2039 2040
  Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
  Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
Don Gagne's avatar
Don Gagne committed
2041
  Index count = 0;
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076

//   if(nr>=8)
//   {
//     for(Index j2=0; j2<packet_cols8; j2+=8)
//     {
//       // skip what we have before
//       if(PanelMode) count += 8 * offset;
//       for(Index k=0; k<depth; k++)
//       {
//         if (PacketSize==8) {
//           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
//           pstoreu(blockB+count, cj.pconj(A));
//         } else if (PacketSize==4) {
//           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
//           Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
//           pstoreu(blockB+count, cj.pconj(A));
//           pstoreu(blockB+count+PacketSize, cj.pconj(B));
//         } else {
//           const Scalar* b0 = &rhs[k*rhsStride + j2];
//           blockB[count+0] = cj(b0[0]);
//           blockB[count+1] = cj(b0[1]);
//           blockB[count+2] = cj(b0[2]);
//           blockB[count+3] = cj(b0[3]);
//           blockB[count+4] = cj(b0[4]);
//           blockB[count+5] = cj(b0[5]);
//           blockB[count+6] = cj(b0[6]);
//           blockB[count+7] = cj(b0[7]);
//         }
//         count += 8;
//       }
//       // skip what we have after
//       if(PanelMode) count += 8 * (stride-offset-depth);
//     }
//   }
  if(nr>=4)
LM's avatar
LM committed
2077
  {
2078
    for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
LM's avatar
LM committed
2079
    {
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
      // skip what we have before
      if(PanelMode) count += 4 * offset;
      for(Index k=0; k<depth; k++)
      {
        if (PacketSize==4) {
          Packet A = rhs.loadPacket(k, j2);
          pstoreu(blockB+count, cj.pconj(A));
          count += PacketSize;
        } else {
          const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
          blockB[count+0] = cj(dm0(0));
          blockB[count+1] = cj(dm0(1));
          blockB[count+2] = cj(dm0(2));
          blockB[count+3] = cj(dm0(3));
          count += 4;
        }
      }
      // skip what we have after
      if(PanelMode) count += 4 * (stride-offset-depth);
LM's avatar
LM committed
2099
    }
Don Gagne's avatar
Don Gagne committed
2100 2101
  }
  // copy the remaining columns one at a time (nr==1)
2102
  for(Index j2=packet_cols4; j2<cols; ++j2)
Don Gagne's avatar
Don Gagne committed
2103 2104 2105
  {
    if(PanelMode) count += offset;
    for(Index k=0; k<depth; k++)
LM's avatar
LM committed
2106
    {
2107
      blockB[count] = cj(rhs(k, j2));
Don Gagne's avatar
Don Gagne committed
2108
      count += 1;
LM's avatar
LM committed
2109
    }
Don Gagne's avatar
Don Gagne committed
2110
    if(PanelMode) count += stride-offset-depth;
LM's avatar
LM committed
2111
  }
Don Gagne's avatar
Don Gagne committed
2112
}
LM's avatar
LM committed
2113 2114 2115 2116 2117 2118 2119

} // end namespace internal

/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
  * \sa setCpuCacheSize */
inline std::ptrdiff_t l1CacheSize()
{
2120 2121
  std::ptrdiff_t l1, l2, l3;
  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
LM's avatar
LM committed
2122 2123 2124 2125 2126 2127 2128
  return l1;
}

/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
  * \sa setCpuCacheSize */
inline std::ptrdiff_t l2CacheSize()
{
2129 2130
  std::ptrdiff_t l1, l2, l3;
  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
LM's avatar
LM committed
2131 2132 2133
  return l2;
}

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
/** \returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size paramete\
rs.                                                                                                                
* \sa setCpuCacheSize */
inline std::ptrdiff_t l3CacheSize()
{
  std::ptrdiff_t l1, l2, l3;
  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
  return l3;
}

LM's avatar
LM committed
2144 2145 2146 2147 2148
/** Set the cpu L1 and L2 cache sizes (in bytes).
  * These values are use to adjust the size of the blocks
  * for the algorithms working per blocks.
  *
  * \sa computeProductBlockingSizes */
2149
inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
LM's avatar
LM committed
2150
{
2151
  internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);
LM's avatar
LM committed
2152 2153
}

Don Gagne's avatar
Don Gagne committed
2154 2155
} // end namespace Eigen

LM's avatar
LM committed
2156
#endif // EIGEN_GENERAL_BLOCK_PANEL_H