#include #include extern const char *__progname; /* mulaw seems to be a floating-point format that splits up eight bits as sign(1), exponent(3), mantissa(4) - and for then some incomprehensible reason bit-complements the whole thing. */ static unsigned char linear_to_ulaw(signed long int linear) { unsigned char sign; unsigned char exp; unsigned char mant; static unsigned exptbl[128] = { 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 }; if (linear < 0) { linear = - linear; sign = 0x80; } else { sign = 0; } linear += 0x84; /* where does this magic number come from?? */ if (linear > 32767) linear = 32767; exp = exptbl[linear>>8]; mant = (linear >> (exp+3)) & 0xf; return(~(sign|(exp<<4)|mant)); } int main(void); int main(void) { int c1; int c2; while (1) { c1 = getchar(); if (c1 == EOF) exit(0); c2 = getchar(); if (c2 == EOF) exit(0); putchar(linear_to_ulaw((short int)((c1<<8)|c2))); } }