"Fossies" - the Fresh Open Source Software Archive

Member "muscle/util/ByteBuffer.cpp" (28 Nov 2019, 23736 Bytes) of package /linux/privat/muscle7.52.zip:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "ByteBuffer.cpp" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 7.41_vs_7.50.

    1 #include "dataio/SeekableDataIO.h"
    2 #include "util/ByteBuffer.h"
    3 #include "util/MiscUtilityFunctions.h"
    4 #include "system/GlobalMemoryAllocator.h"
    5 
    6 namespace muscle {
    7 
    8 void ByteBuffer :: AdoptBuffer(uint32 numBytes, uint8 * optBuffer)
    9 {
   10    Clear(true);  // free any previously held array
   11    _buffer = optBuffer;
   12    _numValidBytes = _numAllocatedBytes = numBytes;
   13 }
   14 
   15 status_t ByteBuffer :: SetBuffer(uint32 numBytes, const uint8 * buffer)
   16 {
   17    if (IsByteInLocalBuffer(buffer))
   18    {
   19       // Special logic for handling it when the caller wants our bytes-array to become a subset of its former self.
   20       const uint32 numReadableBytes = (uint32)((_buffer+_numValidBytes)-buffer);
   21       if (numBytes > numReadableBytes)
   22       {
   23          LogTime(MUSCLE_LOG_CRITICALERROR, "ByteBuffer::SetBuffer();  Attempted to read " UINT32_FORMAT_SPEC " bytes off the end of our internal buffer!\n", numBytes-numReadableBytes);
   24          return B_BAD_ARGUMENT;
   25       }
   26       else
   27       {
   28          if (buffer > _buffer) memmove(_buffer, buffer, numBytes);
   29          return SetNumBytes(numBytes, true);
   30       }
   31    }
   32    else
   33    {
   34       Clear(numBytes<(_numAllocatedBytes/2));  // FogBugz #6933: if the new buffer takes up less than half of our current space, toss it
   35 
   36       status_t ret;
   37       if (SetNumBytes(numBytes, false).IsError(ret)) return ret;
   38       if ((buffer)&&(_buffer)) memcpy(_buffer, buffer, numBytes);
   39       return B_NO_ERROR;
   40    }
   41 }
   42 
   43 status_t ByteBuffer :: SetNumBytes(uint32 newNumBytes, bool retainData)
   44 {
   45    TCHECKPOINT;
   46 
   47    if (newNumBytes > _numAllocatedBytes)
   48    {
   49       IMemoryAllocationStrategy * as = GetMemoryAllocationStrategy();
   50       if (retainData)
   51       {
   52          uint8 * newBuf = (uint8 *) (as ? as->Realloc(_buffer, newNumBytes, _numAllocatedBytes, true) : muscleRealloc(_buffer, newNumBytes));
   53          if (newBuf)
   54          {
   55             _buffer = newBuf;
   56             _numAllocatedBytes = _numValidBytes = newNumBytes;
   57          }
   58          else RETURN_OUT_OF_MEMORY;
   59       }
   60       else
   61       {
   62          uint8 * newBuf = NULL;
   63          if (newNumBytes > 0)
   64          {
   65             newBuf = (uint8 *) (as ? as->Malloc(newNumBytes) : muscleAlloc(newNumBytes));
   66             if (newBuf == NULL) RETURN_OUT_OF_MEMORY;
   67          }
   68          if (as) as->Free(_buffer, _numAllocatedBytes); else muscleFree(_buffer);
   69          _buffer = newBuf;
   70          _numAllocatedBytes = _numValidBytes = newNumBytes;
   71       }
   72    }
   73    else _numValidBytes = newNumBytes;  // truncating our array is easy!
   74 
   75    return B_NO_ERROR;
   76 }
   77 
   78 status_t ByteBuffer :: AppendBytes(const uint8 * bytes, uint32 numBytes, bool allocExtra)
   79 {
   80    if (numBytes == 0) return B_NO_ERROR;
   81 
   82    if ((bytes)&&(IsByteInLocalBuffer(bytes))&&((_numValidBytes+numBytes)>_numAllocatedBytes))
   83    {
   84       // Oh dear, caller wants us to add a copy of some of our own bytes to ourself, AND we'll need to perform a reallocation to do it!
   85       // So to avoid freeing (bytes) before we read from them, we're going to copy them over to a temporary buffer first.
   86       uint8 * tmpBuf = newnothrow uint8[numBytes];
   87       if (tmpBuf) memcpy(tmpBuf, bytes, numBytes);
   88              else RETURN_OUT_OF_MEMORY;
   89       const status_t ret = AppendBytes(tmpBuf, numBytes, allocExtra);
   90       delete [] tmpBuf;
   91       return ret;
   92    }
   93 
   94    const uint32 oldValidBytes = _numValidBytes;  // save this value since SetNumBytes() will change it
   95    status_t ret;
   96    if (SetNumBytesWithExtraSpace(_numValidBytes+numBytes, allocExtra).IsError(ret)) return ret;
   97    if (bytes != NULL) memcpy(_buffer+oldValidBytes, bytes, numBytes);
   98    return B_NO_ERROR;
   99 }
  100 
  101 status_t ByteBuffer :: SetNumBytesWithExtraSpace(uint32 newNumValidBytes, bool allocExtra)
  102 {
  103    status_t ret;
  104    if (SetNumBytes(((allocExtra)&&(newNumValidBytes > _numAllocatedBytes)) ? muscleMax(newNumValidBytes*4, (uint32)128) : newNumValidBytes, true).IsOK(ret))
  105    {
  106       _numValidBytes = newNumValidBytes;
  107       return B_NO_ERROR;
  108    }
  109    else return ret;
  110 }
  111 
  112 status_t ByteBuffer :: FreeExtraBytes()
  113 {
  114    TCHECKPOINT;
  115 
  116    if (_numValidBytes < _numAllocatedBytes)
  117    {
  118       IMemoryAllocationStrategy * as = GetMemoryAllocationStrategy();
  119       uint8 * newBuf = (uint8 *) (as ? as->Realloc(_buffer, _numValidBytes, _numAllocatedBytes, true) : muscleRealloc(_buffer, _numValidBytes));
  120       if ((_numValidBytes == 0)||(newBuf)) 
  121       {
  122          _buffer            = newBuf;
  123          _numAllocatedBytes = _numValidBytes;
  124       }
  125       else RETURN_OUT_OF_MEMORY;
  126    }
  127    return B_NO_ERROR;
  128 }
  129 
  130 /** Overridden to set our buffer directly from (copyFrom)'s Flatten() method */
  131 status_t ByteBuffer :: CopyFromImplementation(const Flattenable & copyFrom)
  132 {
  133    const uint32 numBytes = copyFrom.FlattenedSize();
  134    status_t ret;
  135    if (SetNumBytes(numBytes, false).IsError(ret)) return ret;
  136    copyFrom.Flatten(_buffer);
  137    return B_NO_ERROR;
  138 }
  139 
  140 void ByteBuffer :: Clear(bool releaseBuffers)
  141 {
  142    if (releaseBuffers)
  143    {
  144       IMemoryAllocationStrategy * as = GetMemoryAllocationStrategy();
  145       if (as) as->Free(_buffer, _numAllocatedBytes); else muscleFree(_buffer);
  146       _buffer = NULL;
  147       _numValidBytes = _numAllocatedBytes = 0;
  148    }
  149    else SetNumBytes(0, false);
  150 }
  151 
  152 void ByteBuffer :: PrintToStream(uint32 maxBytesToPrint, uint32 numColumns, FILE * optFile) const
  153 {
  154    PrintHexBytes(GetBuffer(), muscleMin(maxBytesToPrint, GetNumBytes()), "ByteBuffer", numColumns, optFile);
  155 }
  156 
  157 ByteBuffer operator+(const ByteBuffer & lhs, const ByteBuffer & rhs)
  158 {
  159    ByteBuffer ret;
  160    if (ret.SetNumBytes(lhs.GetNumBytes()+rhs.GetNumBytes(), false) == B_NO_ERROR)
  161    {
  162       memcpy(ret.GetBuffer(), lhs.GetBuffer(), lhs.GetNumBytes());
  163       memcpy(ret.GetBuffer()+lhs.GetNumBytes(), rhs.GetBuffer(), rhs.GetNumBytes());
  164    }
  165    return ret;
  166 }
  167 
  168 static ByteBufferRef::ItemPool _bufferPool;
  169 ByteBufferRef::ItemPool * GetByteBufferPool() {return &_bufferPool;}
  170 const ByteBuffer & GetEmptyByteBuffer() {return _bufferPool.GetDefaultObject();}
  171 
  172 static const ConstByteBufferRef _emptyBufRef(&_bufferPool.GetDefaultObject(), false);
  173 ConstByteBufferRef GetEmptyByteBufferRef() {return _emptyBufRef;}
  174 
  175 ByteBufferRef GetByteBufferFromPool(uint32 numBytes, const uint8 * optBuffer) {return GetByteBufferFromPool(_bufferPool, numBytes, optBuffer);}
  176 ByteBufferRef GetByteBufferFromPool(ObjectPool<ByteBuffer> & pool, uint32 numBytes, const uint8 * optBuffer)
  177 {
  178    ByteBufferRef ref(pool.ObtainObject());
  179    if ((ref())&&(ref()->SetBuffer(numBytes, optBuffer) != B_NO_ERROR)) ref.Reset();  // return NULL ref on out-of-memory
  180    return ref;
  181 }
  182 
  183 ByteBufferRef GetByteBufferFromPool(SeekableDataIO & dio) {return GetByteBufferFromPool(_bufferPool, dio);}
  184 
  185 ByteBufferRef GetByteBufferFromPool(ObjectPool<ByteBuffer> & pool, SeekableDataIO & dio)
  186 {
  187    const int64 dioLen = dio.GetLength();
  188    if (dioLen < 0) return ByteBufferRef();  // we don't support reading in unknown lengths of data (for now)
  189 
  190    const int64 pos = muscleMax(dio.GetPosition(), (int64) 0);
  191 
  192    const int64 numBytesToRead = dioLen-pos;
  193    if (numBytesToRead < 0) return ByteBufferRef();  // wtf?
  194 
  195    const int64 maxBBSize = (int64) ((uint32)-1);  // no point trying to read more data than a ByteBuffer will support anyway
  196    if (numBytesToRead > maxBBSize) return ByteBufferRef();
  197 
  198    ByteBufferRef ret = GetByteBufferFromPool(pool, (uint32)numBytesToRead);
  199    if (ret() == NULL) return ByteBufferRef();
  200 
  201    // This will truncate the ByteBuffer if we end up reading fewer bytes than we expected to
  202    ret()->SetNumBytes(dio.ReadFully(ret()->GetBuffer(), ret()->GetNumBytes()), true);
  203    return ret;
  204 }
  205 
  206 // These Flattenable methods are implemented here so that if you don't use them, you
  207 // don't need to include ByteBuffer.o in your Makefile.  If you do use them, then you
  208 // needed to include ByteBuffer.o in your Makefile anyway.
  209 
  210 Ref<ByteBuffer> Flattenable :: FlattenToByteBuffer() const
  211 {
  212    ByteBufferRef bufRef = GetByteBufferFromPool(FlattenedSize());
  213    if (bufRef()) Flatten(bufRef()->GetBuffer());
  214    return bufRef;
  215 }
  216 
  217 status_t Flattenable :: FlattenToByteBuffer(ByteBuffer & outBuf) const
  218 {
  219    status_t ret;
  220    if (outBuf.SetNumBytes(FlattenedSize(), false).IsError(ret)) return ret;
  221    Flatten(outBuf.GetBuffer());
  222    return B_NO_ERROR;
  223 }
  224 
  225 status_t Flattenable :: UnflattenFromByteBuffer(const ByteBuffer & buf) 
  226 {
  227    return Unflatten(buf.GetBuffer(), buf.GetNumBytes());
  228 }
  229 
  230 status_t Flattenable :: UnflattenFromByteBuffer(const ConstRef<ByteBuffer> & buf)
  231 {
  232    return buf() ? Unflatten(buf()->GetBuffer(), buf()->GetNumBytes()) : B_BAD_ARGUMENT;
  233 }
  234 
  235 uint32 ByteBuffer :: ReadInt8s(int8 * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  236 {
  237    const uint8 * readAt = _buffer+readByteOffset;
  238    numValsToRead = muscleMin(numValsToRead, GetNumValidBytesAtOffset(readByteOffset));
  239    memcpy(vals, readAt, numValsToRead);
  240    readByteOffset += numValsToRead;
  241    return numValsToRead;
  242 }
  243 
  244 uint32 ByteBuffer :: ReadInt16s(int16 * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  245 {
  246    const uint8 * readAt = _buffer+readByteOffset;
  247    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/sizeof(int16)));
  248    const uint32 numBytesToRead = numValsToRead*sizeof(int16);
  249    if (IsEndianSwapEnabled())
  250    {
  251       for (uint32 i=0; i<numValsToRead; i++) vals[i] = B_SWAP_INT16(muscleCopyIn<int16>(&readAt[i*sizeof(int16)]));
  252    }
  253    else memcpy(vals, readAt, numBytesToRead);
  254 
  255    readByteOffset += numBytesToRead;
  256    return numValsToRead;
  257 }
  258 
  259 uint32 ByteBuffer :: ReadInt32s(int32 * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  260 {
  261    const uint8 * readAt = _buffer+readByteOffset;
  262    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/sizeof(int32)));
  263    const uint32 numBytesToRead = numValsToRead*sizeof(int32);
  264    if (IsEndianSwapEnabled())
  265    {
  266       for (uint32 i=0; i<numValsToRead; i++) vals[i] = B_SWAP_INT32(muscleCopyIn<int32>(&readAt[i*sizeof(int32)]));
  267    }
  268    else memcpy(vals, readAt, numBytesToRead);
  269 
  270    readByteOffset += numBytesToRead;
  271    return numValsToRead;
  272 }
  273 
  274 uint32 ByteBuffer :: ReadInt64s(int64 * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  275 {
  276    const uint8 * readAt = _buffer+readByteOffset;
  277    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/sizeof(int64)));
  278    const uint32 numBytesToRead = numValsToRead*sizeof(int64);
  279    if (IsEndianSwapEnabled())
  280    {
  281       for (uint64 i=0; i<numValsToRead; i++) vals[i] = B_SWAP_INT64(muscleCopyIn<int64>(&readAt[i*sizeof(int64)]));
  282    }
  283    else memcpy(vals, readAt, numBytesToRead);
  284 
  285    readByteOffset += numBytesToRead;
  286    return numValsToRead;
  287 }
  288 
  289 uint32 ByteBuffer :: ReadFloats(float * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  290 {
  291    const uint8 * readAt = _buffer+readByteOffset;
  292    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/sizeof(int32)));
  293    const uint32 numBytesToRead = numValsToRead*sizeof(int32);
  294    if (IsEndianSwapEnabled())
  295    {
  296 #if B_HOST_IS_BENDIAN
  297       for (uint32 i=0; i<numValsToRead; i++) vals[i] = B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&readAt[i*sizeof(int32)]));
  298 #else
  299       for (uint32 i=0; i<numValsToRead; i++) vals[i] = B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&readAt[i*sizeof(int32)]));
  300 #endif
  301    }
  302    else memcpy(vals, readAt, numBytesToRead);
  303 
  304    readByteOffset += numBytesToRead;
  305    return numValsToRead;
  306 }
  307 
  308 uint32 ByteBuffer :: ReadDoubles(double * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  309 {
  310    const uint8 * readAt = _buffer+readByteOffset;
  311    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/sizeof(int64)));
  312    const uint32 numBytesToRead = numValsToRead*sizeof(int64);
  313    if (IsEndianSwapEnabled())
  314    {
  315 #if B_HOST_IS_BENDIAN
  316       for (uint32 i=0; i<numValsToRead; i++) vals[i] = B_LENDIAN_TO_HOST_IDOUBLE(muscleCopyIn<int64>(&readAt[i*sizeof(int64)]));
  317 #else
  318       for (uint32 i=0; i<numValsToRead; i++) vals[i] = B_BENDIAN_TO_HOST_IDOUBLE(muscleCopyIn<int64>(&readAt[i*sizeof(int64)]));
  319 #endif
  320    }
  321    else memcpy(vals, readAt, numBytesToRead);
  322 
  323    readByteOffset += numBytesToRead;
  324    return numValsToRead;
  325 }
  326 
  327 uint32 ByteBuffer :: ReadPoints(Point * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  328 {
  329    const uint32 bytesPerPoint = sizeof(int32)*2;
  330    const uint8 * readAt = _buffer+readByteOffset;
  331    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/bytesPerPoint));
  332    const uint32 numBytesToRead = numValsToRead*bytesPerPoint;
  333    if (IsEndianSwapEnabled())
  334    {
  335       for (uint32 i=0; i<numValsToRead; i++)
  336       {
  337          const uint8 * rBase = &readAt[i*bytesPerPoint];
  338 #if B_HOST_IS_BENDIAN
  339          vals[i].Set(B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[0*sizeof(int32)])), B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[1*sizeof(int32)])));
  340 #else
  341          vals[i].Set(B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[0*sizeof(int32)])), B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[1*sizeof(int32)])));
  342 #endif
  343       }
  344    }
  345    else 
  346    {
  347       for (uint32 i=0; i<numValsToRead; i++)
  348       {
  349          const uint8 * rBase = &readAt[i*bytesPerPoint];
  350          vals[i].Set(muscleCopyIn<float>(&rBase[0*sizeof(int32)]), muscleCopyIn<float>(&rBase[1*sizeof(int32)]));
  351       }
  352    }
  353 
  354    readByteOffset += numBytesToRead;
  355    return numValsToRead;
  356 }
  357 
  358 uint32 ByteBuffer :: ReadRects(Rect * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  359 {
  360    const uint32 bytesPerRect = sizeof(int32)*4;
  361    const uint8 * readAt = _buffer+readByteOffset;
  362    numValsToRead = muscleMin(numValsToRead, (uint32) (GetNumValidBytesAtOffset(readByteOffset)/bytesPerRect));
  363    const uint32 numBytesToRead = numValsToRead*bytesPerRect;
  364    if (IsEndianSwapEnabled())
  365    {
  366       for (uint32 i=0; i<numValsToRead; i++)
  367       {
  368          const uint8 * rBase = &readAt[i*bytesPerRect];
  369 #if B_HOST_IS_BENDIAN
  370          vals[i].Set(B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[0*sizeof(int32)])), 
  371                      B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[1*sizeof(int32)])),
  372                      B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[2*sizeof(int32)])),
  373                      B_LENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[3*sizeof(int32)])));
  374 #else
  375          vals[i].Set(B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[0*sizeof(int32)])), 
  376                      B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[1*sizeof(int32)])),
  377                      B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[2*sizeof(int32)])),
  378                      B_BENDIAN_TO_HOST_IFLOAT(muscleCopyIn<int32>(&rBase[3*sizeof(int32)])));
  379 #endif
  380       }
  381    }
  382    else 
  383    {
  384       for (uint32 i=0; i<numValsToRead; i++)
  385       {
  386          const uint8 * rBase = &readAt[i*bytesPerRect];
  387          vals[i].Set(muscleCopyIn<float>(&rBase[0*sizeof(int32)]), 
  388                      muscleCopyIn<float>(&rBase[1*sizeof(int32)]),
  389                      muscleCopyIn<float>(&rBase[2*sizeof(int32)]),
  390                      muscleCopyIn<float>(&rBase[3*sizeof(int32)]));
  391       }
  392    }
  393 
  394    readByteOffset += numBytesToRead;
  395    return numValsToRead;
  396 }
  397 
  398 status_t ByteBuffer :: ReadFlat(Flattenable & flat, uint32 & readByteOffset, uint32 optMaxReadSize) const
  399 {
  400    if (&flat == this) return B_BAD_ARGUMENT;  // don't get cute
  401 
  402    status_t ret;
  403    if (flat.Unflatten(&_buffer[readByteOffset], muscleMin(optMaxReadSize, (readByteOffset < _numValidBytes) ? (_numValidBytes-readByteOffset) : 0)).IsError(ret)) return ret;
  404    readByteOffset += flat.FlattenedSize();
  405    return B_NO_ERROR;
  406 }
  407 
  408 uint32 ByteBuffer :: ReadStrings(String * vals, uint32 numValsToRead, uint32 & readByteOffset) const
  409 {
  410    for (uint32 i=0; i<numValsToRead; i++)
  411    {
  412       const uint32 numBytesAvailable = GetNumValidBytesAtOffset(readByteOffset);
  413       if ((numBytesAvailable == 0)||(vals[i].SetCstr((const char *)(_buffer+readByteOffset), numBytesAvailable) != B_NO_ERROR)) return i;
  414       readByteOffset = muscleMin(readByteOffset+vals[i].Length()+1, _numValidBytes);
  415    }
  416    return numValsToRead;
  417 }
  418 
  419 status_t ByteBuffer :: WriteInt8s(const int8 * vals, uint32 numVals, uint32 & writeByteOffset)
  420 {
  421    const uint32 newByteSize = muscleMax(_numValidBytes, writeByteOffset+numVals);
  422    if (newByteSize > _numValidBytes)
  423    {
  424       status_t ret;
  425       if (SetNumBytesWithExtraSpace(newByteSize, true).IsError(ret)) return ret;
  426    }
  427 
  428    uint8 * writeTo = _buffer+writeByteOffset;
  429    memcpy(writeTo, vals, numVals);
  430    writeByteOffset += numVals;
  431    return B_NO_ERROR;
  432 }
  433 
  434 status_t ByteBuffer :: WriteInt16s(const int16 * vals, uint32 numVals, uint32 & writeByteOffset)
  435 {
  436    const uint32 numBytes     = numVals*sizeof(int16);
  437    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  438    if (newValidSize > _numValidBytes)
  439    {
  440       status_t ret;
  441       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  442    }
  443 
  444    uint8 * writeTo = _buffer+writeByteOffset;
  445    if (IsEndianSwapEnabled())
  446    {
  447        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int16)), B_SWAP_INT16(vals[i]));
  448    }
  449    else memcpy(writeTo, vals, numBytes);
  450 
  451    writeByteOffset += numBytes;
  452    return B_NO_ERROR;
  453 }
  454 
  455 status_t ByteBuffer :: WriteInt32s(const int32 * vals, uint32 numVals, uint32 & writeByteOffset)
  456 {
  457    const uint32 numBytes     = numVals*sizeof(int32);
  458    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  459    if (newValidSize > _numValidBytes)
  460    {
  461       status_t ret;
  462       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  463    }
  464 
  465    uint8 * writeTo = _buffer+writeByteOffset;
  466    if (IsEndianSwapEnabled())
  467    {
  468        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int32)), B_SWAP_INT32(vals[i]));
  469    }
  470    else memcpy(writeTo, vals, numBytes);
  471 
  472    writeByteOffset += numBytes;
  473    return B_NO_ERROR;
  474 }
  475 
  476 status_t ByteBuffer :: WriteInt64s(const int64 * vals, uint32 numVals, uint32 & writeByteOffset)
  477 {
  478    const uint32 numBytes     = numVals*sizeof(int64);
  479    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  480    if (newValidSize > _numValidBytes)
  481    {
  482       status_t ret;
  483       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  484    }
  485 
  486    uint8 * writeTo = _buffer+writeByteOffset;
  487    if (IsEndianSwapEnabled())
  488    {
  489        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int64)), B_SWAP_INT64(vals[i]));
  490    }
  491    else memcpy(writeTo, vals, numBytes);
  492 
  493    writeByteOffset += numBytes;
  494    return B_NO_ERROR;
  495 }
  496 
  497 status_t ByteBuffer :: WriteFloats(const float * vals, uint32 numVals, uint32 & writeByteOffset)
  498 {
  499    const uint32 numBytes     = numVals*sizeof(int32);
  500    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  501    if (newValidSize > _numValidBytes)
  502    {
  503       status_t ret;
  504       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  505    }
  506 
  507    uint8 * writeTo = _buffer+writeByteOffset;
  508    if (IsEndianSwapEnabled())
  509    {
  510 #if B_HOST_IS_BENDIAN
  511        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int32)), B_HOST_TO_LENDIAN_IFLOAT(vals[i]));
  512 #else
  513        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int32)), B_HOST_TO_BENDIAN_IFLOAT(vals[i]));
  514 #endif
  515    }
  516    else memcpy(writeTo, vals, numBytes);
  517 
  518    writeByteOffset += numBytes;
  519    return B_NO_ERROR;
  520 }
  521 
  522 status_t ByteBuffer :: WriteDoubles(const double * vals, uint32 numVals, uint32 & writeByteOffset)
  523 {
  524    const uint32 numBytes     = numVals*sizeof(int64);
  525    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  526    if (newValidSize > _numValidBytes)
  527    {
  528       status_t ret;
  529       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  530    }
  531 
  532    uint8 * writeTo = _buffer+writeByteOffset;
  533    if (IsEndianSwapEnabled())
  534    {
  535 #if B_HOST_IS_BENDIAN
  536        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int64)), B_HOST_TO_LENDIAN_IDOUBLE(vals[i]));
  537 #else
  538        for (uint32 i=0; i<numVals; i++) muscleCopyOut(writeTo+(i*sizeof(int64)), B_HOST_TO_BENDIAN_IDOUBLE(vals[i]));
  539 #endif
  540    }
  541    else memcpy(writeTo, vals, numBytes);
  542 
  543    writeByteOffset += numBytes;
  544    return B_NO_ERROR;
  545 }
  546 
  547 status_t ByteBuffer :: WritePoints(const Point * vals, uint32 numVals, uint32 & writeByteOffset)
  548 {
  549    const uint32 bytesPerPoint = sizeof(int32)*2;
  550    const uint32 numBytes     = numVals*bytesPerPoint;
  551    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  552    if (newValidSize > _numValidBytes)
  553    {
  554       status_t ret;
  555       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  556    }
  557 
  558    uint8 * writeTo = _buffer+writeByteOffset;
  559 
  560    if (IsEndianSwapEnabled())
  561    {
  562       for (uint32 i=0; i<numVals; i++)
  563       {
  564          uint8 * wBase = &writeTo[i*bytesPerPoint];
  565 #if B_HOST_IS_BENDIAN
  566          for (uint32 j=0; j<2; j++) muscleCopyOut(wBase+(j*sizeof(int32)), B_HOST_TO_LENDIAN_IFLOAT(vals[i][j]));
  567 #else
  568          for (uint32 j=0; j<2; j++) muscleCopyOut(wBase+(j*sizeof(int32)), B_HOST_TO_BENDIAN_IFLOAT(vals[i][j]));
  569 #endif
  570       }
  571    }
  572    else 
  573    {
  574       for (uint32 i=0; i<numVals; i++)
  575       {
  576          uint8 * wBase = &writeTo[i*bytesPerPoint];
  577          for (uint32 j=0; j<2; j++) muscleCopyOut(wBase+(j*sizeof(int32)), vals[i][j]);
  578       }
  579    }
  580 
  581    writeByteOffset += numBytes;
  582    return B_NO_ERROR;
  583 }
  584 
  585 status_t ByteBuffer :: WriteRects(const Rect * vals, uint32 numVals, uint32 & writeByteOffset)
  586 {
  587    const uint32 bytesPerRect = sizeof(int32)*4;
  588    const uint32 numBytes     = numVals*bytesPerRect;
  589    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  590    if (newValidSize > _numValidBytes)
  591    {
  592       status_t ret;
  593       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  594    }
  595 
  596    uint8 * writeTo = _buffer+writeByteOffset;
  597 
  598    if (IsEndianSwapEnabled())
  599    {
  600       for (uint32 i=0; i<numVals; i++)
  601       {
  602          uint8 * wBase = &writeTo[i*bytesPerRect];
  603 #if B_HOST_IS_BENDIAN
  604          for (uint32 j=0; j<4; j++) muscleCopyOut(wBase+(j*sizeof(int32)), B_HOST_TO_LENDIAN_IFLOAT(vals[i][j]));
  605 #else
  606          for (uint32 j=0; j<4; j++) muscleCopyOut(wBase+(j*sizeof(int32)), B_HOST_TO_BENDIAN_IFLOAT(vals[i][j]));
  607 #endif
  608       }
  609    }
  610    else 
  611    {
  612       for (uint32 i=0; i<numVals; i++)
  613       {
  614          uint8 * wBase = &writeTo[i*bytesPerRect];
  615          for (uint32 j=0; j<4; j++) muscleCopyOut(wBase+(j*sizeof(int32)), vals[i][j]);
  616       }
  617    }
  618 
  619    writeByteOffset += numBytes;
  620    return B_NO_ERROR;
  621 }
  622 
  623 status_t ByteBuffer :: WriteFlat(const Flattenable & val, uint32 & writeByteOffset)
  624 {
  625    if (&val == this) return B_BAD_ARGUMENT;  // don't get cute
  626 
  627    const uint32 numBytes     = val.FlattenedSize();
  628    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  629    if (newValidSize > _numValidBytes)
  630    {
  631       status_t ret;
  632       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  633    }
  634 
  635    val.Flatten(&_buffer[writeByteOffset]);
  636    writeByteOffset += numBytes;
  637    return B_NO_ERROR;
  638 }
  639 
  640 status_t ByteBuffer :: WriteStrings(const String * vals, uint32 numVals, uint32 & writeByteOffset)
  641 {
  642    uint32 numBytes = 0; for (uint32 i=0; i<numVals; i++) numBytes += vals[i].FlattenedSize();
  643    const uint32 newValidSize = muscleMax(_numValidBytes, writeByteOffset+numBytes);
  644    if (newValidSize > _numValidBytes)
  645    {
  646       status_t ret;
  647       if (SetNumBytesWithExtraSpace(newValidSize, true).IsError(ret)) return ret;
  648    }
  649 
  650    for (uint32 i=0; i<numVals; i++)
  651    {
  652       vals[i].Flatten(&_buffer[writeByteOffset]);
  653       writeByteOffset += vals[i].FlattenedSize();
  654    }
  655    return B_NO_ERROR;
  656 }
  657 
  658 } // end namespace muscle
  659