All Downloads are FREE. Search and download functionalities are using the official Maven repository.

de.bixilon.kotlinglm.func.func_Integer.kt Maven / Gradle / Ivy

There is a newer version: 0.9.9.1-12
Show newest version
package de.bixilon.kotlinglm.func

import de.bixilon.kotlinglm.*
import kool.BYTES
import unsigned.toULong

/**
 * Created by GBarbieri on 06.04.2017.
 */

interface func_Integer {

    //
//    // uaddCarry
//    GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry)
//    {
//        uint64 const Value64(static_cast(x) + static_cast(y));
//        uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1));
//        Carry = Value64 > Max32 ? 1u : 0u;
//        return static_cast(Value64 % (Max32 + static_cast(1)));
//    }
//
//    template
//    GLM_FUNC_QUALIFIER vec uaddCarry(vec const& x, vec const& y, vec& Carry)
//    {
//        vec Value64(vec(x) + vec(y));
//        vec Max32((static_cast(1) << static_cast(32)) - static_cast(1));
//        Carry = mix(vec(0), vec(1), greaterThan(Value64, Max32));
//        return vec(Value64 % (Max32 + static_cast(1)));
//    }
//
//    // usubBorrow
//    GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow)
//    {
//        GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch");
//
//        Borrow = x >= y ? static_cast(0) : static_cast(1);
//        if(y >= x)
//            return y - x;
//        else
//            return static_cast((static_cast(1) << static_cast(32)) + (static_cast(y) - static_cast(x)));
//    }
//
//    template
//    GLM_FUNC_QUALIFIER vec usubBorrow(vec const& x, vec const& y, vec& Borrow)
//    {
//        Borrow = mix(vec(1), vec(0), greaterThanEqual(x, y));
//        vec const YgeX(y - x);
//        vec const XgeY(vec((static_cast(1) << static_cast(32)) + (vec(y) - vec(x))));
//        return mix(XgeY, YgeX, greaterThanEqual(y, x));
//    }
//
//    // umulExtended
//    GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb)
//    {
//        GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch");
//
//        uint64 Value64 = static_cast(x) * static_cast(y);
//        msb = static_cast(Value64 >> static_cast(32));
//        lsb = static_cast(Value64);
//    }
//
//    template
//    GLM_FUNC_QUALIFIER void umulExtended(vec const& x, vec const& y, vec& msb, vec& lsb)
//    {
//        GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch");
//
//        vec Value64(vec(x) * vec(y));
//        msb = vec(Value64 >> static_cast(32));
//        lsb = vec(Value64);
//    }
//
//    // imulExtended
//    GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb)
//    {
//        GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch");
//
//        int64 Value64 = static_cast(x) * static_cast(y);
//        msb = static_cast(Value64 >> static_cast(32));
//        lsb = static_cast(Value64);
//    }
//
//    template
//    GLM_FUNC_QUALIFIER void imulExtended(vec const& x, vec const& y, vec& msb, vec& lsb)
//    {
//        GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch");
//
//        vec Value64(vec(x) * vec(y));
//        lsb = vec(Value64 & static_cast(0xFFFFFFFF));
//        msb = vec((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF));
//    }
//
    // bitfieldExtract
    fun bitfieldExtract(value: uint, offset: Int, bits: Int): uint = (value ushr offset) and GLM.detail.mask(bits)

    //    template
//    GLM_FUNC_QUALIFIER vec bitfieldExtract(vec const& Value, int Offset, int Bits)
//    {
//        GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs");
//
//        return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits));
//    }
//
    // bitfieldInsert
    fun bitfieldInsert(base: uint, insert: uint, offset: Int, bits: Int): uint {
        val mask = GLM.detail.mask(bits) shl offset
        return (base and mask.inv()) or (insert and mask)
    }

    //    template
//    GLM_FUNC_QUALIFIER vec bitfieldInsert(vec const& Base, vec const& Insert, int Offset, int Bits)
//    {
//        GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values");
//
//        T const Mask = static_cast(detail::mask(Bits) << Offset);
//        return (Base & ~Mask) | (Insert & Mask);
//    }
//
    // bitfieldReverse
    fun bitfieldReverse(v: uint): uint {
        var x = GLM.detail.compute_bitfieldReverseStep(v, 0x5555555555555555, 1)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x3333333333333333, 2)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x0F0F0F0F0F0F0F0F, 4)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x00FF00FF00FF00FF, 8)
        return GLM.detail.compute_bitfieldReverseStep(x, 0x0000FFFF0000FFFF, 16)
    }

    fun bitfieldReverse(v: ulong): ulong {
        var x = GLM.detail.compute_bitfieldReverseStep(v, 0x5555555555555555, 1)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x3333333333333333, 2)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x0F0F0F0F0F0F0F0F, 4)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x00FF00FF00FF00FF, 8)
        x = GLM.detail.compute_bitfieldReverseStep(x, 0x0000FFFF0000FFFF, 16)
        return GLM.detail.compute_bitfieldReverseStep(x, 0x00000000FFFFFFFF, 32)
    }

    //    fun bitCount(byte: Byte) = byte.bitCount(byte.i)
//    fun bitCount(short: Short) = java.lang.Integer.bitCount(short.i)
    fun bitCount(int: Int) = int.bitCount

    fun bitCount(long: Long) = long.bitCount
}

interface detail_Integer {

    fun mask(bits: Int) = if (bits >= Int.BYTES * 8) 0.inv() else (1 shl bits) - 1

    fun compute_bitfieldReverseStep(v: uint, mask: ulong, shift: uint): uint {
        val vL = v.toULong()
        return (((vL and mask) shl shift) or ((vL and mask.inv()) ushr shift)).i
    }

    fun compute_bitfieldReverseStep(v: ulong, mask: ulong, shift: uint): ulong = ((v and mask) shl shift) or ((v and mask.inv()) ushr shift)
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy