Remove Buffer/BitVectors from compiler/C rts

This commit is contained in:
Edwin Brady 2015-03-28 18:29:59 +00:00
parent 73ea02b205
commit 847296659b
15 changed files with 2 additions and 757 deletions

View File

@ -822,71 +822,3 @@ VAL idris_b64T32(VM *vm, VAL a) {
return cl;
}
// SSE vectors
VAL idris_IDXB8x16(VM* vm, VAL vec, VAL idx) {
__m128i sse = *vec->info.bits128p;
uint8_t data[16];
_mm_storeu_si128((__m128i*)&data, sse);
return MKB8(vm, data[idx->info.bits32]);
}
VAL idris_IDXB16x8(VM* vm, VAL vec, VAL idx) {
__m128i sse = *vec->info.bits128p;
uint16_t data[8];
_mm_storeu_si128((__m128i*)&data, sse);
return MKB16(vm, data[idx->info.bits32]);
}
VAL idris_IDXB32x4(VM* vm, VAL vec, VAL idx) {
__m128i sse = *vec->info.bits128p;
uint32_t data[4];
_mm_storeu_si128((__m128i*)&data, sse);
return MKB32(vm, data[idx->info.bits32]);
}
VAL idris_IDXB64x2(VM* vm, VAL vec, VAL idx) {
__m128i sse = *vec->info.bits128p;
uint64_t data[2];
_mm_storeu_si128((__m128i*)&data, sse);
return MKB64(vm, data[idx->info.bits32]);
}
VAL idris_b8x16CopyForGC(VM *vm, VAL vec) {
__m128i sse = *vec->info.bits128p;
VAL cl = allocate(sizeof(Closure) + 16 + sizeof(__m128i), 1);
SETTY(cl, BITS8X16);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
*cl->info.bits128p = sse;
return cl;
}
VAL idris_b16x8CopyForGC(VM *vm, VAL vec) {
__m128i sse = *vec->info.bits128p;
VAL cl = allocate(sizeof(Closure) + 16 + sizeof(__m128i), 1);
SETTY(cl, BITS16X8);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
*cl->info.bits128p = sse;
return cl;
}
VAL idris_b32x4CopyForGC(VM *vm, VAL vec) {
__m128i sse = *vec->info.bits128p;
VAL cl = allocate(sizeof(Closure) + 16 + sizeof(__m128i), 1);
SETTY(cl, BITS32X4);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
*cl->info.bits128p = sse;
return cl;
}
VAL idris_b64x2CopyForGC(VM *vm, VAL vec) {
__m128i sse = *vec->info.bits128p;
VAL cl = allocate(sizeof(Closure) + 16 + sizeof(__m128i), 1);
SETTY(cl, BITS64X2);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
*cl->info.bits128p = sse;
return cl;
}

View File

@ -126,14 +126,4 @@ VAL idris_b64T8(VM *vm, VAL a);
VAL idris_b64T16(VM *vm, VAL a);
VAL idris_b64T32(VM *vm, VAL a);
VAL idris_IDXB8x16(VM* vm, VAL vec, VAL idx);
VAL idris_IDXB16x8(VM* vm, VAL vec, VAL idx);
VAL idris_IDXB32x4(VM* vm, VAL vec, VAL idx);
VAL idris_IDXB64x2(VM* vm, VAL vec, VAL idx);
VAL idris_b8x16CopyForGC(VM *vm, VAL a);
VAL idris_b16x8CopyForGC(VM *vm, VAL a);
VAL idris_b32x4CopyForGC(VM *vm, VAL a);
VAL idris_b64x2CopyForGC(VM *vm, VAL a);
#endif

View File

@ -30,9 +30,6 @@ VAL copy(VM* vm, VAL x) {
case STROFFSET:
cl = MKSTROFFc(vm, x->info.str_offset);
break;
case BUFFER:
cl = MKBUFFERc(vm, x->info.buf);
break;
case BIGINT:
cl = MKBIGMc(vm, x->info.ptr);
break;
@ -54,18 +51,6 @@ VAL copy(VM* vm, VAL x) {
case BITS64:
cl = idris_b64CopyForGC(vm, x);
break;
case BITS8X16:
cl = idris_b8x16CopyForGC(vm, x);
break;
case BITS16X8:
cl = idris_b16x8CopyForGC(vm, x);
break;
case BITS32X4:
cl = idris_b32x4CopyForGC(vm, x);
break;
case BITS64X2:
cl = idris_b64x2CopyForGC(vm, x);
break;
case FWD:
return x->info.ptr;
default:

View File

@ -327,107 +327,6 @@ VAL MKB64(VM* vm, uint64_t bits64) {
return cl;
}
VAL MKB8x16const(VM* vm,
uint8_t v0, uint8_t v1, uint8_t v2, uint8_t v3,
uint8_t v4, uint8_t v5, uint8_t v6, uint8_t v7,
uint8_t v8, uint8_t v9, uint8_t v10, uint8_t v11,
uint8_t v12, uint8_t v13, uint8_t v14, uint8_t v15) {
Closure* cl = allocate(sizeof(Closure) + sizeof(__m128i), 1);
SETTY(cl, BITS8X16);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
uint8_t data[16];
data[0]=v0; data[1]=v1; data[2]=v2; data[3]=v3;
data[4]=v4; data[5]=v5; data[6]=v6; data[7]=v7;
data[8]=v8; data[9]=v9; data[10]=v10; data[11]=v11;
data[12]=v12; data[13]=v13; data[14]=v14; data[15]=v15;
*cl->info.bits128p = _mm_loadu_si128((__m128i*)&data);
return cl;
}
VAL MKB8x16(VM* vm,
VAL v0, VAL v1, VAL v2, VAL v3,
VAL v4, VAL v5, VAL v6, VAL v7,
VAL v8, VAL v9, VAL v10, VAL v11,
VAL v12, VAL v13, VAL v14, VAL v15) {
return MKB8x16const(vm,
v0->info.bits8, v1->info.bits8, v2->info.bits8, v3->info.bits8,
v4->info.bits8, v5->info.bits8, v6->info.bits8, v7->info.bits8,
v8->info.bits8, v9->info.bits8, v10->info.bits8, v11->info.bits8,
v12->info.bits8, v13->info.bits8, v14->info.bits8, v15->info.bits8);
}
VAL MKB16x8const(VM* vm,
uint16_t v0, uint16_t v1, uint16_t v2, uint16_t v3,
uint16_t v4, uint16_t v5, uint16_t v6, uint16_t v7) {
Closure* cl = allocate(sizeof(Closure) + sizeof(__m128i), 1);
SETTY(cl, BITS16X8);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
uint16_t data[8];
data[0]=v0; data[1]=v1; data[2]=v2; data[3]=v3;
data[4]=v4; data[5]=v5; data[6]=v6; data[7]=v7;
*cl->info.bits128p = _mm_loadu_si128((__m128i*)&data);
return cl;
}
VAL MKB16x8(VM* vm,
VAL v0, VAL v1, VAL v2, VAL v3,
VAL v4, VAL v5, VAL v6, VAL v7) {
return MKB16x8const(vm,
v0->info.bits16, v1->info.bits16, v2->info.bits16, v3->info.bits16,
v4->info.bits16, v5->info.bits16, v6->info.bits16, v7->info.bits16);
}
VAL MKB32x4const(VM* vm,
uint32_t v0, uint32_t v1, uint32_t v2, uint32_t v3) {
Closure* cl = allocate(sizeof(Closure) + 16 + sizeof(__m128i), 1);
SETTY(cl, BITS64X2);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
uint32_t data[4];
data[0]=v0; data[1]=v1; data[2]=v2; data[3]=v3;
*cl->info.bits128p = _mm_loadu_si128((__m128i*)&data);
return cl;
}
VAL MKB32x4(VM* vm,
VAL v0, VAL v1, VAL v2, VAL v3) {
return MKB32x4const(vm,
v0->info.bits32, v1->info.bits32, v2->info.bits32, v3->info.bits32);
}
VAL MKB64x2const(VM* vm, uint64_t v0, uint64_t v1) {
Closure* cl = allocate(sizeof(Closure) + 16 + sizeof(__m128i), 1);
SETTY(cl, BITS64X2);
cl->info.bits128p = (__m128i*)ALIGN((uintptr_t)cl + sizeof(Closure), 16);
assert ((uintptr_t)cl->info.bits128p % 16 == 0);
uint64_t data[2];
data[0]=v0; data[1]=v1;
*cl->info.bits128p = _mm_loadu_si128((__m128i*)&data);
return cl;
}
VAL MKB64x2(VM* vm, VAL v0, VAL v1) {
return MKB64x2const(vm,
v0->info.bits64,
v1->info.bits64);
}
void PROJECT(VM* vm, VAL r, int loc, int arity) {
int i;
for(i = 0; i < arity; ++i) {
@ -740,240 +639,6 @@ VAL idris_systemInfo(VM* vm, VAL index) {
return MKSTR(vm, "");
}
VAL MKBUFFERc(VM* vm, Buffer* buf) {
Closure* cl = allocate(sizeof(Closure) + sizeof *buf + buf->cap, 1);
SETTY(cl, BUFFER);
cl->info.buf = (Buffer*)((void*)cl + sizeof(Closure));
memmove(cl->info.buf, buf, sizeof *buf + buf->fill);
return cl;
}
static VAL internal_allocate(VM *vm, size_t hint) {
size_t size = hint + sizeof(Closure) + sizeof(Buffer);
// Round up to a power of 2
--size;
size_t i;
for (i = 0; i <= sizeof size; ++i)
size |= size >> (1 << i);
++size;
Closure* cl = allocate(size, 0);
SETTY(cl, BUFFER);
cl->info.buf = (Buffer*)((void*)cl + sizeof(Closure));
cl->info.buf->cap = size - (sizeof(Closure) + sizeof(Buffer));
return cl;
}
// Following functions cast uint64_t to size_t, which may narrow!
VAL idris_buffer_allocate(VM* vm, VAL hint) {
Closure* cl = internal_allocate(vm, hint->info.bits64);
cl->info.buf->fill = 0;
return cl;
}
static void internal_memset(void *dest, const void *src, size_t size, size_t num) {
while (num--) {
memmove(dest, src, size);
dest += size;
}
}
static VAL internal_prepare_append(VM* vm, VAL buf, size_t bufLen, size_t appLen) {
size_t totalLen = bufLen + appLen;
Closure* cl;
if (bufLen != buf->info.buf->fill ||
totalLen > buf->info.buf->cap) {
// We're not at the fill or are over cap, so need a new buffer
cl = internal_allocate(vm, totalLen);
memmove(cl->info.buf->store,
buf->info.buf->store,
bufLen);
cl->info.buf->fill = totalLen;
} else {
// Hooray, can just bump the fill
cl = buf;
cl->info.buf->fill += appLen;
}
return cl;
}
VAL idris_appendBuffer(VM* vm, VAL fst, VAL fstLen, VAL cnt, VAL sndLen, VAL sndOff, VAL snd) {
size_t firstLength = fstLen->info.bits64;
size_t secondLength = sndLen->info.bits64;
size_t count = cnt->info.bits64;
size_t offset = sndOff->info.bits64;
Closure* cl = internal_prepare_append(vm, fst, firstLength, count * secondLength);
internal_memset(cl->info.buf->store + firstLength, snd->info.buf->store + offset, secondLength, count);
return cl;
}
// Special cased because we can use memset
VAL idris_appendB8Native(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
size_t bufLen = len->info.bits64;
size_t count = cnt->info.bits64;
Closure* cl = internal_prepare_append(vm, buf, bufLen, count);
memset(cl->info.buf->store + bufLen, val->info.bits8, count);
return cl;
}
static VAL internal_append_bits(VM* vm, VAL buf, VAL bufLen, VAL cnt, const void* val, size_t val_len) {
size_t len = bufLen->info.bits64;
size_t count = cnt->info.bits64;
Closure* cl = internal_prepare_append(vm, buf, len, count * val_len);
internal_memset(cl->info.buf->store + len, val, val_len, count);
return cl;
}
VAL idris_appendB16Native(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
return internal_append_bits(vm, buf, len, cnt, &val->info.bits16, sizeof val->info.bits16);
}
VAL idris_appendB16LE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
// On gcc 4.8 -O3 compiling for x86_64, using leVal like this is
// optimized away. Presumably the same holds for other sizes and
// conversly on BE systems
unsigned char leVal[sizeof val] = { val->info.bits16
, (val->info.bits16 >> 8)
};
return internal_append_bits(vm, buf, len, cnt, leVal, sizeof leVal);
}
VAL idris_appendB16BE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
unsigned char beVal[sizeof val] = { (val->info.bits16 >> 8)
, val->info.bits16
};
return internal_append_bits(vm, buf, len, cnt, beVal, sizeof beVal);
}
VAL idris_appendB32Native(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
return internal_append_bits(vm, buf, len, cnt, &val->info.bits32, sizeof val->info.bits32);
}
VAL idris_appendB32LE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
unsigned char leVal[sizeof val] = { val->info.bits32
, (val->info.bits32 >> 8)
, (val->info.bits32 >> 16)
, (val->info.bits32 >> 24)
};
return internal_append_bits(vm, buf, len, cnt, leVal, sizeof leVal);
}
VAL idris_appendB32BE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
unsigned char beVal[sizeof val] = { (val->info.bits32 >> 24)
, (val->info.bits32 >> 16)
, (val->info.bits32 >> 8)
, val->info.bits32
};
return internal_append_bits(vm, buf, len, cnt, beVal, sizeof beVal);
}
VAL idris_appendB64Native(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
return internal_append_bits(vm, buf, len, cnt, &val->info.bits64, sizeof val->info.bits64);
}
VAL idris_appendB64LE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
unsigned char leVal[sizeof val] = { val->info.bits64
, (val->info.bits64 >> 8)
, (val->info.bits64 >> 16)
, (val->info.bits64 >> 24)
, (val->info.bits64 >> 32)
, (val->info.bits64 >> 40)
, (val->info.bits64 >> 48)
, (val->info.bits64 >> 56)
};
return internal_append_bits(vm, buf, len, cnt, leVal, sizeof leVal);
}
VAL idris_appendB64BE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val) {
unsigned char beVal[sizeof val] = { (val->info.bits64 >> 56)
, (val->info.bits64 >> 48)
, (val->info.bits64 >> 40)
, (val->info.bits64 >> 32)
, (val->info.bits64 >> 24)
, (val->info.bits64 >> 16)
, (val->info.bits64 >> 8)
, val->info.bits64
};
return internal_append_bits(vm, buf, len, cnt, beVal, sizeof beVal);
}
VAL idris_peekB8Native(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
uint8_t *val = buf->info.buf->store + offset;
return MKB8(vm, *val);
}
VAL idris_peekB16Native(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
uint16_t *val = (uint16_t *) (buf->info.buf->store + offset);
return MKB16(vm, *val);
}
VAL idris_peekB16LE(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
return MKB16(vm, ((uint16_t) buf->info.buf->store[offset]) +
(((uint16_t) buf->info.buf->store[offset + 1]) << 8));
}
VAL idris_peekB16BE(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
return MKB16(vm, ((uint16_t) buf->info.buf->store[offset + 1]) +
(((uint16_t) buf->info.buf->store[offset]) << 8));
}
VAL idris_peekB32Native(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
uint32_t *val = (uint32_t *) (buf->info.buf->store + offset);
return MKB32(vm, *val);
}
VAL idris_peekB32LE(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
return MKB32(vm, ((uint32_t) buf->info.buf->store[offset]) +
(((uint32_t) buf->info.buf->store[offset + 1]) << 8) +
(((uint32_t) buf->info.buf->store[offset + 2]) << 16) +
(((uint32_t) buf->info.buf->store[offset + 3]) << 24));
}
VAL idris_peekB32BE(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
return MKB32(vm, ((uint32_t) buf->info.buf->store[offset + 3]) +
(((uint32_t) buf->info.buf->store[offset + 2]) << 8) +
(((uint32_t) buf->info.buf->store[offset + 1]) << 16) +
(((uint32_t) buf->info.buf->store[offset]) << 24));
}
VAL idris_peekB64Native(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
uint64_t *val = (uint64_t *) (buf->info.buf->store + offset);
return MKB64(vm, *val);
}
VAL idris_peekB64LE(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
return MKB64(vm, ((uint64_t) buf->info.buf->store[offset]) +
(((uint64_t) buf->info.buf->store[offset + 1]) << 8) +
(((uint64_t) buf->info.buf->store[offset + 2]) << 16) +
(((uint64_t) buf->info.buf->store[offset + 3]) << 24) +
(((uint64_t) buf->info.buf->store[offset + 4]) << 32) +
(((uint64_t) buf->info.buf->store[offset + 5]) << 40) +
(((uint64_t) buf->info.buf->store[offset + 6]) << 48) +
(((uint64_t) buf->info.buf->store[offset + 7]) << 56));
}
VAL idris_peekB64BE(VM* vm, VAL buf, VAL off) {
size_t offset = off->info.bits64;
return MKB64(vm, ((uint64_t) buf->info.buf->store[offset + 7]) +
(((uint64_t) buf->info.buf->store[offset + 6]) << 8) +
(((uint64_t) buf->info.buf->store[offset + 5]) << 16) +
(((uint64_t) buf->info.buf->store[offset + 4]) << 24) +
(((uint64_t) buf->info.buf->store[offset + 3]) << 32) +
(((uint64_t) buf->info.buf->store[offset + 2]) << 40) +
(((uint64_t) buf->info.buf->store[offset + 1]) << 48) +
(((uint64_t) buf->info.buf->store[offset]) << 56));
}
typedef struct {
VM* vm; // thread's VM
VM* callvm; // calling thread's VM
@ -1058,9 +723,6 @@ VAL doCopyTo(VM* vm, VAL x) {
case STRING:
cl = MKSTRc(vm, x->info.str);
break;
case BUFFER:
cl = MKBUFFERc(vm, x->info.buf);
break;
case BIGINT:
cl = MKBIGMc(vm, x->info.ptr);
break;

View File

@ -26,8 +26,7 @@
typedef enum {
CON, INT, BIGINT, FLOAT, STRING, STROFFSET,
BITS8, BITS16, BITS32, BITS64, UNIT, PTR, FWD,
MANAGEDPTR, BUFFER, BITS8X16, BITS16X8, BITS32X4,
BITS64X2
MANAGEDPTR
} ClosureType;
typedef struct Closure *VAL;
@ -208,37 +207,12 @@ VAL MKB16(VM* vm, uint16_t b);
VAL MKB32(VM* vm, uint32_t b);
VAL MKB64(VM* vm, uint64_t b);
// SSE Vectors
VAL MKB8x16(VM* vm,
VAL v0, VAL v1, VAL v2, VAL v3,
VAL v4, VAL v5, VAL v6, VAL v7,
VAL v8, VAL v9, VAL v10, VAL v11,
VAL v12, VAL v13, VAL v14, VAL v15);
VAL MKB8x16const(VM* vm,
uint8_t v0, uint8_t v1, uint8_t v2, uint8_t v3,
uint8_t v4, uint8_t v5, uint8_t v6, uint8_t v7,
uint8_t v8, uint8_t v9, uint8_t v10, uint8_t v11,
uint8_t v12, uint8_t v13, uint8_t v14, uint8_t v15);
VAL MKB16x8(VM* vm,
VAL v0, VAL v1, VAL v2, VAL v3,
VAL v4, VAL v5, VAL v6, VAL v7);
VAL MKB16x8const(VM* vm,
uint16_t v0, uint16_t v1, uint16_t v2, uint16_t v3,
uint16_t v4, uint16_t v5, uint16_t v6, uint16_t v7);
VAL MKB32x4(VM* vm,
VAL v0, VAL v1, VAL v2, VAL v3);
VAL MKB32x4const(VM* vm,
uint32_t v0, uint32_t v1, uint32_t v2, uint32_t v3);
VAL MKB64x2(VM* vm, VAL v0, VAL v1);
VAL MKB64x2const(VM* vm, uint64_t v0, uint64_t v1);
// following versions don't take a lock when allocating
VAL MKFLOATc(VM* vm, double val);
VAL MKSTROFFc(VM* vm, StrOffset* off);
VAL MKSTRc(VM* vm, char* str);
VAL MKPTRc(VM* vm, void* ptr);
VAL MKMPTRc(VM* vm, void* ptr, size_t size);
VAL MKBUFFERc(VM* vm, Buffer* buf);
char* GETSTROFF(VAL stroff);
@ -341,9 +315,6 @@ VAL idris_strCons(VM* vm, VAL x, VAL xs);
VAL idris_strIndex(VM* vm, VAL str, VAL i);
VAL idris_strRev(VM* vm, VAL str);
// Buffer primitives
VAL idris_buffer_allocate(VM* vm, VAL hint);
VAL idris_appendBuffer(VM* vm, VAL fst, VAL fstLen, VAL cnt, VAL sndLen, VAL sndOff, VAL snd);
VAL idris_appendB8Native(VM* vm, VAL buf, VAL len, VAL cnt, VAL val);
VAL idris_appendB16Native(VM* vm, VAL buf, VAL len, VAL cnt, VAL val);
VAL idris_appendB16LE(VM* vm, VAL buf, VAL len, VAL cnt, VAL val);

View File

@ -182,10 +182,6 @@ bcc i (ASSIGNCONST l c)
mkConst (B16 x) = "idris_b16const(vm, " ++ show x ++ "U)"
mkConst (B32 x) = "idris_b32const(vm, " ++ show x ++ "UL)"
mkConst (B64 x) = "idris_b64const(vm, " ++ show x ++ "ULL)"
mkConst (B8V x) = let x' = V.toList x in "MKB8x16const(vm, " ++ intercalate ", " (map (\elem -> show elem ++ "U") x') ++ ")"
mkConst (B16V x) = let x' = V.toList x in "MKB16x8const(vm, " ++ intercalate ", " (map (\elem -> show elem ++ "U") x') ++ ")"
mkConst (B32V x) = let x' = V.toList x in "MKB32x4const(vm, " ++ intercalate ", " (map (\elem -> show elem ++ "UL") x') ++ ")"
mkConst (B64V x) = let x' = V.toList x in "MKB64x2const(vm, " ++ intercalate ", " (map (\elem -> show elem ++ "ULL") x') ++ ")"
-- if it's a type constant, we won't use it, but equally it shouldn't
-- report an error. These might creep into generated for various reasons
-- (especially if erasure is disabled).
@ -556,11 +552,6 @@ doOp v LStrIndex [x, y] = v ++ "idris_strIndex(vm, " ++ creg x ++ "," ++ creg y
doOp v LStrRev [x] = v ++ "idris_strRev(vm, " ++ creg x ++ ")"
doOp v LStrLen [x] = v ++ "idris_strlen(vm, " ++ creg x ++ ")"
doOp v LAllocate [x] = v ++ "idris_buffer_allocate(vm, " ++ creg x ++ ")"
doOp v LAppendBuffer [a, b, c, d, e, f] = v ++ "idris_appendBuffer(vm, " ++ creg a ++ "," ++ creg b ++ "," ++ creg c ++ "," ++ creg d ++ "," ++ creg e ++ "," ++ creg f ++ ")"
doOp v (LAppend ity en) [a, b, c, d] = v ++ "idris_append" ++ intTyName ity ++ show en ++ "(vm, " ++ creg a ++ "," ++ creg b ++ "," ++ creg c ++ "," ++ creg d ++ ")"
doOp v (LPeek ity en) [x, y] = v ++ "idris_peek" ++ intTyName ity ++ show en ++ "(vm, " ++ creg x ++ "," ++ creg y ++ ")"
doOp v LStdIn [] = v ++ "MKPTR(vm, stdin)"
doOp v LStdOut [] = v ++ "MKPTR(vm, stdout)"
doOp v LStdErr [] = v ++ "MKPTR(vm, stderr)"
@ -576,16 +567,6 @@ doOp v (LChInt ITChar) args = doOp v (LChInt ITNative) args
doOp v (LIntCh ITNative) args = v ++ creg (last args)
doOp v (LIntCh ITChar) args = doOp v (LIntCh ITNative) args
doOp v c@(LMkVec IT8 _) args = v ++ "MKB8x16(vm, " ++ (intercalate ", " (map creg args)) ++ ")"
doOp v c@(LMkVec IT16 _) args = v ++ "MKB16x8(vm, " ++ (intercalate ", " (map creg args)) ++ ")"
doOp v c@(LMkVec IT32 _) args = v ++ "MKB32x4(vm, " ++ (intercalate ", " (map creg args)) ++ ")"
doOp v c@(LMkVec IT64 _) args = v ++ "MKB64x2(vm, " ++ (intercalate ", " (map creg args)) ++ ")"
doOp v c@(LIdxVec IT8 _) [p, i] = v ++ "idris_IDXB8x16(vm, " ++ creg p ++ ", " ++ creg i ++ ")"
doOp v c@(LIdxVec IT16 _) [p, i] = v ++ "idris_IDXB16x8(vm, " ++ creg p ++ ", " ++ creg i ++ ")"
doOp v c@(LIdxVec IT32 _) [p, i] = v ++ "idris_IDXB32x4(vm, " ++ creg p ++ ", " ++ creg i ++ ")"
doOp v c@(LIdxVec IT64 _) [p, i] = v ++ "idris_IDXB64x2(vm, " ++ creg p ++ ", " ++ creg i ++ ")"
doOp v LSystemInfo [x] = v ++ "idris_systemInfo(vm, " ++ creg x ++ ")"
doOp v LNoOp args = v ++ creg (last args)
doOp _ op args = error "doOp of (" ++ show op ++ ") not implemented, arguments (" ++ show args ++ ")"

View File

@ -75,22 +75,11 @@ data PrimFn = LPlus ArithTy | LMinus ArithTy | LTimes ArithTy
| LFExp | LFLog | LFSin | LFCos | LFTan | LFASin | LFACos | LFATan
| LFSqrt | LFFloor | LFCeil | LFNegate
-- construction element extraction element insertion
| LMkVec NativeTy Int | LIdxVec NativeTy Int | LUpdateVec NativeTy Int
| LStrHead | LStrTail | LStrCons | LStrIndex | LStrRev
| LStdIn | LStdOut | LStdErr
-- Buffers
| LAllocate
| LAppendBuffer
-- system info
| LSystemInfo
-- Note that for Bits8 only Native endianness is actually used
-- and the user-exposed interface for Bits8 doesn't mention
-- endianness
| LAppend IntTy Endianness
| LPeek IntTy Endianness
| LFork
| LPar -- evaluate argument anywhere, possibly on another

View File

@ -350,11 +350,6 @@ instance Binary Const where
putWord8 (fromIntegral . fromEnum $ ity)
putWord8 (fromIntegral count)
B8V x1 -> putWord8 21 >> put x1
B16V x1 -> putWord8 22 >> put x1
B32V x1 -> putWord8 23 >> put x1
B64V x1 -> putWord8 24 >> put x1
BufferType -> putWord8 25
ManagedPtrType -> putWord8 26
VoidType -> putWord8 27
WorldType -> putWord8 28
@ -395,11 +390,6 @@ instance Binary Const where
c <- getWord8
return (AType (ATInt (ITVec (toEnum . fromIntegral $ e) (fromIntegral c))))
21 -> fmap B8V get
22 -> fmap B16V get
23 -> fmap B32V get
24 -> fmap B64V get
25 -> return BufferType
26 -> return ManagedPtrType
27 -> return VoidType
28 -> return WorldType

View File

@ -321,10 +321,6 @@ isConstType (B8 _) (AType (ATInt _)) = True
isConstType (B16 _) (AType (ATInt _)) = True
isConstType (B32 _) (AType (ATInt _)) = True
isConstType (B64 _) (AType (ATInt _)) = True
isConstType (B8V _) (AType (ATInt _)) = True
isConstType (B16V _) (AType (ATInt _)) = True
isConstType (B32V _) (AType (ATInt _)) = True
isConstType (B64V _) (AType (ATInt _)) = True
isConstType _ _ = False
data Pat = PCon Bool Name Int [Pat]

View File

@ -150,17 +150,12 @@ instance NFData Const where
rnf (B16 x1) = rnf x1 `seq` ()
rnf (B32 x1) = rnf x1 `seq` ()
rnf (B64 x1) = rnf x1 `seq` ()
rnf (B8V x1) = rnf x1 `seq` ()
rnf (B16V x1) = rnf x1 `seq` ()
rnf (B32V x1) = rnf x1 `seq` ()
rnf (B64V x1) = rnf x1 `seq` ()
rnf (AType x1) = rnf x1 `seq` ()
rnf WorldType = ()
rnf TheWorld = ()
rnf StrType = ()
rnf PtrType = ()
rnf ManagedPtrType = ()
rnf BufferType = ()
rnf VoidType = ()
rnf Forgot = ()

View File

@ -565,11 +565,9 @@ intTyWidth ITBig = error "IRTS.Lang.intTyWidth: Big integers have variable width
data Const = I Int | BI Integer | Fl Double | Ch Char | Str String
| B8 Word8 | B16 Word16 | B32 Word32 | B64 Word64
| B8V (Vector Word8) | B16V (Vector Word16)
| B32V (Vector Word32) | B64V (Vector Word64)
| AType ArithTy | StrType
| WorldType | TheWorld
| PtrType | ManagedPtrType | BufferType | VoidType | Forgot
| PtrType | ManagedPtrType | VoidType | Forgot
deriving (Eq, Ord, Data, Typeable)
{-!
deriving instance Binary Const
@ -580,7 +578,6 @@ isTypeConst :: Const -> Bool
isTypeConst (AType _) = True
isTypeConst StrType = True
isTypeConst ManagedPtrType = True
isTypeConst BufferType = True
isTypeConst WorldType = True
isTypeConst PtrType = True
isTypeConst VoidType = True
@ -599,7 +596,6 @@ instance Pretty Const OutputAnnotation where
pretty StrType = text "String"
pretty TheWorld = text "%theWorld"
pretty WorldType = text "prim__World"
pretty BufferType = text "prim__UnsafeBuffer"
pretty PtrType = text "Ptr"
pretty ManagedPtrType = text "Ptr"
pretty VoidType = text "Void"
@ -620,10 +616,6 @@ constIsType (B8 _) = False
constIsType (B16 _) = False
constIsType (B32 _) = False
constIsType (B64 _) = False
constIsType (B8V _) = False
constIsType (B16V _) = False
constIsType (B32V _) = False
constIsType (B64V _) = False
constIsType _ = True
-- | Get the docstring for a Const
@ -635,7 +627,6 @@ constDocs c@(AType ATFloat) = "Double-precision floating-point nu
constDocs StrType = "Strings in some unspecified encoding"
constDocs PtrType = "Foreign pointers"
constDocs ManagedPtrType = "Managed pointers"
constDocs BufferType = "Copy-on-write buffers"
constDocs c@(AType (ATInt (ITFixed IT8))) = "Eight bits (unsigned)"
constDocs c@(AType (ATInt (ITFixed IT16))) = "Sixteen bits (unsigned)"
constDocs c@(AType (ATInt (ITFixed IT32))) = "Thirty-two bits (unsigned)"
@ -657,10 +648,6 @@ constDocs (B32 w) = "The thirty-two-bit value 0x" ++
showIntAtBase 16 intToDigit w ""
constDocs (B64 w) = "The sixty-four-bit value 0x" ++
showIntAtBase 16 intToDigit w ""
constDocs (B8V v) = "A vector of eight-bit values"
constDocs (B16V v) = "A vector of sixteen-bit values"
constDocs (B32V v) = "A vector of thirty-two-bit values"
constDocs (B64V v) = "A vector of sixty-four-bit values"
constDocs prim = "Undocumented"
data Universe = NullType | UniqueType | AllTypes
@ -1303,10 +1290,6 @@ instance Show Const where
show (B16 x) = show x
show (B32 x) = show x
show (B64 x) = show x
show (B8V x) = "<" ++ intercalate "," (map show (V.toList x)) ++ ">"
show (B16V x) = "<" ++ intercalate "," (map show (V.toList x)) ++ ">"
show (B32V x) = "<" ++ intercalate "," (map show (V.toList x)) ++ ">"
show (B64V x) = "<" ++ intercalate "," (map show (V.toList x)) ++ ">"
show (AType ATFloat) = "Float"
show (AType (ATInt ITBig)) = "Integer"
show (AType (ATInt ITNative)) = "Int"
@ -1316,7 +1299,6 @@ instance Show Const where
show TheWorld = "prim__TheWorld"
show WorldType = "prim__WorldType"
show StrType = "String"
show BufferType = "prim__UnsafeBuffer"
show PtrType = "Ptr"
show ManagedPtrType = "ManagedPtr"
show VoidType = "Void"

View File

@ -151,10 +151,6 @@ check' holes ctxt env top = chk (TType (UVar (-5))) env top where
constType (B16 _) = Constant (AType (ATInt (ITFixed IT16)))
constType (B32 _) = Constant (AType (ATInt (ITFixed IT32)))
constType (B64 _) = Constant (AType (ATInt (ITFixed IT64)))
constType (B8V a) = Constant (AType (ATInt (ITVec IT8 (V.length a))))
constType (B16V a) = Constant (AType (ATInt (ITVec IT16 (V.length a))))
constType (B32V a) = Constant (AType (ATInt (ITVec IT32 (V.length a))))
constType (B64V a) = Constant (AType (ATInt (ITVec IT64 (V.length a))))
constType TheWorld = Constant WorldType
constType Forgot = Erased
constType _ = TType (UVal 0)

View File

@ -2586,10 +2586,6 @@ reflectConstant c@(B8 _) = reflCall "B8" [RConstant c]
reflectConstant c@(B16 _) = reflCall "B16" [RConstant c]
reflectConstant c@(B32 _) = reflCall "B32" [RConstant c]
reflectConstant c@(B64 _) = reflCall "B64" [RConstant c]
reflectConstant (B8V ws) = reflCall "B8V" [mkList (Var (sUN "Bits8")) . map (RConstant . B8) . V.toList $ ws]
reflectConstant (B16V ws) = reflCall "B8V" [mkList (Var (sUN "Bits16")) . map (RConstant . B16) . V.toList $ ws]
reflectConstant (B32V ws) = reflCall "B8V" [mkList (Var (sUN "Bits32")) . map (RConstant . B32) . V.toList $ ws]
reflectConstant (B64V ws) = reflCall "B8V" [mkList (Var (sUN "Bits64")) . map (RConstant . B64) . V.toList $ ws]
reflectConstant (AType (ATInt ITNative)) = reflCall "AType" [reflCall "ATInt" [Var (reflm "ITNative")]]
reflectConstant (AType (ATInt ITBig)) = reflCall "AType" [reflCall "ATInt" [Var (reflm "ITBig")]]
reflectConstant (AType ATFloat) = reflCall "AType" [Var (reflm "ATFloat")]
@ -2605,7 +2601,6 @@ reflectConstant (AType (ATInt (ITVec IT32 c))) = reflCall "AType" [reflCall "ATI
reflectConstant (AType (ATInt (ITVec IT64 c))) = reflCall "AType" [reflCall "ATInt" [reflCall "ITVec" [Var (reflm "IT64"), RConstant (I c)]]]
reflectConstant PtrType = Var (reflm "PtrType")
reflectConstant ManagedPtrType = Var (reflm "ManagedPtrType")
reflectConstant BufferType = Var (reflm "BufferType")
reflectConstant VoidType = Var (reflm "VoidType")
reflectConstant Forgot = Var (reflm "Forgot")
reflectConstant WorldType = Var (reflm "WorldType")

View File

@ -113,10 +113,6 @@ constTy (B8 _) = "Bits8"
constTy (B16 _) = "Bits16"
constTy (B32 _) = "Bits32"
constTy (B64 _) = "Bits64"
constTy (B8V _) = "Bits8x16"
constTy (B16V _) = "Bits16x8"
constTy (B32V _) = "Bits32x4"
constTy (B64V _) = "Bits64x2"
constTy _ = "Type"
instance SExpable OutputAnnotation where

View File

@ -190,17 +190,9 @@ primitives =
-- Managed pointer registration
Prim (sUN "prim__registerPtr") (ty [PtrType, AType (ATInt ITNative)] ManagedPtrType) 2 (p_cantreduce)
(2, LRegisterPtr) total,
-- Buffers
Prim (sUN "prim__allocate") (ty [AType (ATInt (ITFixed IT64))] BufferType) 1 (p_allocate)
(1, LAllocate) total,
Prim (sUN "prim__appendBuffer") (ty [BufferType, AType (ATInt (ITFixed IT64)), AType (ATInt (ITFixed IT64)), AType (ATInt (ITFixed IT64)), AType (ATInt (ITFixed IT64)), BufferType] BufferType) 6 (p_appendBuffer)
(6, LAppendBuffer) partial,
Prim (sUN "prim__systemInfo") (ty [AType (ATInt ITNative)] StrType) 1 (p_cantreduce)
(1, LSystemInfo) total
] ++ concatMap intOps [ITFixed IT8, ITFixed IT16, ITFixed IT32, ITFixed IT64, ITBig, ITNative, ITChar]
++ concatMap vecOps vecTypes
++ concatMap fixedOps [ITFixed IT8, ITFixed IT16, ITFixed IT32, ITFixed IT64] -- ITNative, ITChar, ATFloat ] ++ vecTypes
++ vecBitcasts vecTypes
vecTypes :: [IntTy]
vecTypes = [ITVec IT8 16, ITVec IT16 8, ITVec IT32 4, ITVec IT64 2]
@ -270,48 +262,11 @@ vecCmps ity =
, iCmp ity "gt" True (bCmp ity (>)) LGt total
]
-- The TODOs in this function are documented as Issue #1617 on the issue tracker.
--
-- https://github.com/idris-lang/Idris-dev/issues/1617
vecOps :: IntTy -> [Prim]
vecOps ity@(ITVec elem count) =
[ Prim (sUN $ "prim__mk" ++ intTyName ity)
(ty (replicate count . AType . ATInt . ITFixed $ elem) (AType . ATInt $ ity))
count (mkVecCon elem count) (count, LMkVec elem count) total
, Prim (sUN $ "prim__index" ++ intTyName ity)
(ty [AType . ATInt $ ity, AType (ATInt (ITFixed IT32))] (AType . ATInt . ITFixed $ elem))
2 (mkVecIndex count) (2, LIdxVec elem count) partial -- TODO: Ensure this reduces
, Prim (sUN $ "prim__update" ++ intTyName ity)
(ty [AType . ATInt $ ity, AType (ATInt (ITFixed IT32)), AType . ATInt . ITFixed $ elem]
(AType . ATInt $ ity))
3 (mkVecUpdate elem count) (3, LUpdateVec elem count) partial -- TODO: Ensure this reduces
] ++ intArith ity ++ vecCmps ity
bitcastPrim :: ArithTy -> ArithTy -> (ArithTy -> [Const] -> Maybe Const) -> PrimFn -> Prim
bitcastPrim from to impl prim =
Prim (sUN $ "prim__bitcast" ++ aTyName from ++ "_" ++ aTyName to) (ty [AType from] (AType to)) 1 (impl to)
(1, prim) total
vecBitcasts :: [IntTy] -> [Prim]
vecBitcasts tys = [bitcastPrim from to bitcastVec (LBitCast from to)
| from <- map ATInt vecTypes, to <- map ATInt vecTypes, from /= to]
fixedOps :: IntTy -> [Prim]
fixedOps ity@(ITFixed _) =
map appendFun endiannesses ++
map peekFun endiannesses
where
endiannesses = [ Native, LE, BE ]
tyName = intTyName ity
b64 = AType (ATInt (ITFixed IT64))
thisTy = AType $ ATInt ity
appendFun en = Prim (sUN $ "prim__append" ++ tyName ++ show en)
(ty [BufferType, b64, b64, thisTy] BufferType)
4 (p_append en) (4, LAppend ity en) partial
peekFun en = Prim (sUN $ "prim__peek" ++ tyName ++ show en)
(ty [BufferType, b64] thisTy)
2 (p_peek ity en) (2, LPeek ity en) partial
mapHalf :: (V.Unbox a, V.Unbox b) => ((a, a) -> b) -> Vector a -> Vector b
mapHalf f xs = V.generate (V.length xs `div` 2) (\i -> f (xs V.! (i*2), xs V.! (i*2+1)))
@ -339,76 +294,6 @@ truncWord64 :: Bool -> Word64 -> Word32
truncWord64 True x = fromIntegral (x `shiftR` 32)
truncWord64 False x = fromIntegral x
bitcastVec :: ArithTy -> [Const] -> Maybe Const
bitcastVec (ATInt (ITVec IT8 n)) [x@(B8V v)]
| V.length v == n = Just x
bitcastVec (ATInt (ITVec IT16 n)) [B8V v]
| V.length v == n*2 = Just . B16V . mapHalf concatWord8 $ v
bitcastVec (ATInt (ITVec IT32 n)) [B8V v]
| V.length v == n*4 = Just . B32V . mapHalf concatWord16 . mapHalf concatWord8 $ v
bitcastVec (ATInt (ITVec IT64 n)) [B8V v]
| V.length v == n*8 = Just . B64V . mapHalf concatWord32 . mapHalf concatWord16 . mapHalf concatWord8 $ v
bitcastVec (ATInt (ITVec IT8 n)) [B16V v]
| V.length v * 2 == n = Just . B8V . mapDouble truncWord16 $ v
bitcastVec (ATInt (ITVec IT16 n)) [x@(B16V v)]
| V.length v == n = Just x
bitcastVec (ATInt (ITVec IT32 n)) [B16V v]
| V.length v == n*2 = Just . B32V . mapHalf concatWord16 $ v
bitcastVec (ATInt (ITVec IT64 n)) [B16V v]
| V.length v == n*4 = Just . B64V . mapHalf concatWord32 . mapHalf concatWord16 $ v
bitcastVec (ATInt (ITVec IT8 n)) [B32V v]
| V.length v * 4 == n = Just . B8V . mapDouble truncWord16 . mapDouble truncWord32 $ v
bitcastVec (ATInt (ITVec IT16 n)) [B32V v]
| V.length v * 2 == n = Just . B16V . mapDouble truncWord32 $ v
bitcastVec (ATInt (ITVec IT32 n)) [x@(B32V v)]
| V.length v == n = Just x
bitcastVec (ATInt (ITVec IT64 n)) [B32V v]
| V.length v == n*2 = Just . B64V . mapHalf concatWord32 $ v
bitcastVec (ATInt (ITVec IT8 n)) [B64V v]
| V.length v * 8 == n = Just . B8V . mapDouble truncWord16 . mapDouble truncWord32 . mapDouble truncWord64 $ v
bitcastVec (ATInt (ITVec IT16 n)) [B64V v]
| V.length v * 4 == n = Just . B16V . mapDouble truncWord32 . mapDouble truncWord64 $ v
bitcastVec (ATInt (ITVec IT32 n)) [B64V v]
| V.length v == n*2 = Just . B32V . mapDouble truncWord64 $ v
bitcastVec (ATInt (ITVec IT64 n)) [x@(B64V v)]
| V.length v == n = Just x
bitcastVec _ _ = Nothing
mkVecCon :: NativeTy -> Int -> [Const] -> Maybe Const
mkVecCon ity count args
| length ints == count = Just (mkVec ity count ints)
| otherwise = Nothing
where
ints = getInt args
mkVec :: NativeTy -> Int -> [Integer] -> Const
mkVec IT8 len values = B8V $ V.generate len (fromInteger . (values !!))
mkVec IT16 len values = B16V $ V.generate len (fromInteger . (values !!))
mkVec IT32 len values = B32V $ V.generate len (fromInteger . (values !!))
mkVec IT64 len values = B64V $ V.generate len (fromInteger . (values !!))
mkVecIndex count [vec, B32 i] = Just (idxVec vec)
where
idxVec :: Const -> Const
idxVec (B8V v) = B8 $ V.unsafeIndex v (fromIntegral i)
idxVec (B16V v) = B16 $ V.unsafeIndex v (fromIntegral i)
idxVec (B32V v) = B32 $ V.unsafeIndex v (fromIntegral i)
idxVec (B64V v) = B64 $ V.unsafeIndex v (fromIntegral i)
mkVecIndex _ _ = Nothing
mkVecUpdate ity count [vec, B32 i, newElem] = updateVec vec newElem
where
updateVec :: Const -> Const -> Maybe Const
updateVec (B8V v) (B8 e) = Just . B8V $ V.unsafeUpdate v (V.singleton (fromIntegral i, e))
updateVec (B16V v) (B16 e) = Just . B16V $ V.unsafeUpdate v (V.singleton (fromIntegral i, e))
updateVec (B32V v) (B32 e) = Just . B32V $ V.unsafeUpdate v (V.singleton (fromIntegral i, e))
updateVec (B64V v) (B64 e) = Just . B64V $ V.unsafeUpdate v (V.singleton (fromIntegral i, e))
updateVec _ _ = Nothing
mkVecUpdate _ _ _ = Nothing
aTyName :: ArithTy -> String
aTyName (ATInt t) = intTyName t
aTyName ATFloat = "Float"
@ -470,18 +355,6 @@ bsrem (ITFixed IT64) [B64 x, B64 y]
= Just $ B64 (fromIntegral (fromIntegral x `rem` fromIntegral y :: Int64))
bsrem ITNative [I x, I y] = Just $ I (x `rem` y)
bsrem ITChar [Ch x, Ch y] = Just $ Ch (chr $ (ord x) `rem` (ord y))
bsrem (ITVec IT8 _) [B8V x, B8V y]
= Just . B8V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `rem` fromIntegral d :: Int8))) x y
bsrem (ITVec IT16 _) [B16V x, B16V y]
= Just . B16V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `rem` fromIntegral d :: Int16))) x y
bsrem (ITVec IT32 _) [B32V x, B32V y]
= Just . B32V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `rem` fromIntegral d :: Int64))) x y
bsrem (ITVec IT64 _) [B64V x, B64V y]
= Just . B64V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `rem` fromIntegral d :: Int64))) x y
bsrem _ _ = Nothing
bsdiv :: IntTy -> [Const] -> Maybe Const
@ -496,18 +369,6 @@ bsdiv (ITFixed IT64) [B64 x, B64 y]
= Just $ B64 (fromIntegral (fromIntegral x `div` fromIntegral y :: Int64))
bsdiv ITNative [I x, I y] = Just $ I (x `div` y)
bsdiv ITChar [Ch x, Ch y] = Just $ Ch (chr $ (ord x) `div` (ord y))
bsdiv (ITVec IT8 _) [B8V x, B8V y]
= Just . B8V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `div` fromIntegral d :: Int8))) x y
bsdiv (ITVec IT16 _) [B16V x, B16V y]
= Just . B16V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `div` fromIntegral d :: Int16))) x y
bsdiv (ITVec IT32 _) [B32V x, B32V y]
= Just . B32V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `div` fromIntegral d :: Int64))) x y
bsdiv (ITVec IT64 _) [B64V x, B64V y]
= Just . B64V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `div` fromIntegral d :: Int64))) x y
bsdiv _ _ = Nothing
bashr :: IntTy -> [Const] -> Maybe Const
@ -522,18 +383,6 @@ bashr (ITFixed IT64) [B64 x, B64 y]
= Just $ B64 (fromIntegral (fromIntegral x `shiftR` fromIntegral y :: Int64))
bashr ITNative [I x, I y] = Just $ I (x `shiftR` y)
bashr ITChar [Ch x, Ch y] = Just $ Ch (chr $ (ord x) `shiftR` (ord y))
bashr (ITVec IT8 _) [B8V x, B8V y]
= Just . B8V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `shiftR` fromIntegral d :: Int8))) x y
bashr (ITVec IT16 _) [B16V x, B16V y]
= Just . B16V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `shiftR` fromIntegral d :: Int16))) x y
bashr (ITVec IT32 _) [B32V x, B32V y]
= Just . B32V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `shiftR` fromIntegral d :: Int64))) x y
bashr (ITVec IT64 _) [B64V x, B64V y]
= Just . B64V $
V.zipWith (\n d -> (fromIntegral (fromIntegral n `shiftR` fromIntegral d :: Int64))) x y
bashr _ _ = Nothing
bUn :: IntTy -> (forall a. Bits a => a -> a) -> [Const] -> Maybe Const
@ -544,10 +393,6 @@ bUn (ITFixed IT64) op [B64 x] = Just $ B64 (op x)
bUn ITBig op [BI x] = Just $ BI (op x)
bUn ITNative op [I x] = Just $ I (op x)
bUn ITChar op [Ch x] = Just $ Ch (chr $ op (ord x))
bUn (ITVec IT8 _) op [B8V x] = Just . B8V $ V.map op x
bUn (ITVec IT16 _) op [B16V x] = Just . B16V $ V.map op x
bUn (ITVec IT32 _) op [B32V x] = Just . B32V $ V.map op x
bUn (ITVec IT64 _) op [B64V x] = Just . B64V $ V.map op x
bUn _ _ _ = Nothing
bitBin :: IntTy -> (forall a. (Bits a, Integral a) => a -> a -> a) -> [Const] -> Maybe Const
@ -558,10 +403,6 @@ bitBin (ITFixed IT64) op [B64 x, B64 y] = Just $ B64 (op x y)
bitBin ITBig op [BI x, BI y] = Just $ BI (op x y)
bitBin ITNative op [I x, I y] = Just $ I (op x y)
bitBin ITChar op [Ch x, Ch y] = Just $ Ch (chr $ op (ord x) (ord y))
bitBin (ITVec IT8 _) op [B8V x, B8V y] = Just . B8V $ V.zipWith op x y
bitBin (ITVec IT16 _) op [B16V x, B16V y] = Just . B16V $ V.zipWith op x y
bitBin (ITVec IT32 _) op [B32V x, B32V y] = Just . B32V $ V.zipWith op x y
bitBin (ITVec IT64 _) op [B64V x, B64V y] = Just . B64V $ V.zipWith op x y
bitBin _ _ _ = Nothing
bCmp :: IntTy -> (forall a. (Integral a, Ord a) => a -> a -> Bool) -> [Const] -> Maybe Const
@ -572,14 +413,6 @@ bCmp (ITFixed IT64) op [B64 x, B64 y] = Just $ I (if (op x y) then 1 else 0)
bCmp ITBig op [BI x, BI y] = Just $ I (if (op x y) then 1 else 0)
bCmp ITNative op [I x, I y] = Just $ I (if (op x y) then 1 else 0)
bCmp ITChar op [Ch x, Ch y] = Just $ I (if (op (ord x) (ord y)) then 1 else 0)
bCmp (ITVec IT8 _) op [B8V x, B8V y]
= Just . B8V . V.map (\b -> if b then -1 else 0) $ V.zipWith op x y
bCmp (ITVec IT16 _) op [B16V x, B16V y]
= Just . B16V . V.map (\b -> if b then -1 else 0) $ V.zipWith op x y
bCmp (ITVec IT32 _) op [B32V x, B32V y]
= Just . B32V . V.map (\b -> if b then -1 else 0) $ V.zipWith op x y
bCmp (ITVec IT64 _) op [B64V x, B64V y]
= Just . B64V . V.map (\b -> if b then -1 else 0) $ V.zipWith op x y
bCmp _ _ _ = Nothing
@ -717,53 +550,5 @@ p_strCons _ = Nothing
p_strRev [Str xs] = Just $ Str (reverse xs)
p_strRev _ = Nothing
p_allocate, p_appendBuffer :: [Const] -> Maybe Const
p_allocate [B64 _] = Just $ B8V V.empty
p_allocate n = Nothing
p_appendBuffer [B8V vect1, B64 size1, B64 count, B64 size2, B64 off, B8V vect2] =
Just $ B8V (foldl (V.++)
(V.slice 0 (fromIntegral size1) vect1)
(replicate (fromIntegral count) (V.slice (fromIntegral off) (fromIntegral size2) vect2)))
p_appendBuffer n = Nothing
p_append :: Endianness -> [Const] -> Maybe Const
p_append en [B8V vect, B64 size, B64 count, val] =
let bytes = case val of
B8 x -> [x]
B16 x -> [fromIntegral x, fromIntegral (shiftR x 8)]
B32 x -> map (fromIntegral . shiftR x) [0,8..24]
B64 x -> map (fromIntegral . shiftR x) [0,8..56]
in Just $ B8V (foldl V.snoc
(V.slice 0 (fromIntegral size) vect)
(concat (replicate (fromIntegral count)
(case en of
BE -> reverse bytes
_ -> bytes))))
p_append _ _ = Nothing
p_peek :: IntTy -> Endianness -> [Const] -> Maybe Const
p_peek (ITFixed IT8) _ [B8V vect, B64 offset] = Just $ B8 (vect V.! fromIntegral offset)
p_peek (ITFixed IT16) en [B8V vect, B64 offset] =
let bytes = [0..1]
in Just $ B16 (foldl (\full byte -> shiftL full 8 .|. fromIntegral (vect V.! (fromIntegral offset+byte)))
0
(case en of
BE -> bytes
_ -> reverse bytes))
p_peek (ITFixed IT32) en [B8V vect, B64 offset] =
let bytes = [0..1]
in Just $ B32 (foldl (\full byte -> shiftL full 8 .|. fromIntegral (vect V.! (fromIntegral offset+byte)))
0
(case en of
BE -> bytes
_ -> reverse bytes))
p_peek (ITFixed IT64) en [B8V vect, B64 offset] =
let bytes = [0..1]
in Just $ B64 (foldl (\full byte -> shiftL full 8 .|. fromIntegral (vect V.! (fromIntegral offset+byte)))
0
(case en of
BE -> bytes
_ -> reverse bytes))
p_peek _ _ _ = Nothing
p_cantreduce :: a -> Maybe b
p_cantreduce _ = Nothing