00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "avcodec.h"
00022 #include "get_bits.h"
00023 #include "put_bits.h"
00024 #include "bytestream.h"
00025
00057 #define BLKSIZE 1024
00058
00059
00060
00061 static const int index_table[16] = {
00062 -1, -1, -1, -1, 2, 4, 6, 8,
00063 -1, -1, -1, -1, 2, 4, 6, 8,
00064 };
00065
00070 static const int step_table[89] = {
00071 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
00072 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
00073 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
00074 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
00075 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
00076 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
00077 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
00078 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
00079 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
00080 };
00081
00082
00083
00084 static const int AdaptationTable[] = {
00085 230, 230, 230, 230, 307, 409, 512, 614,
00086 768, 614, 512, 409, 307, 230, 230, 230
00087 };
00088
00090 static const uint8_t AdaptCoeff1[] = {
00091 64, 128, 0, 48, 60, 115, 98
00092 };
00093
00095 static const int8_t AdaptCoeff2[] = {
00096 0, -64, 0, 16, 0, -52, -58
00097 };
00098
00099
00100 static const int xa_adpcm_table[5][2] = {
00101 { 0, 0 },
00102 { 60, 0 },
00103 { 115, -52 },
00104 { 98, -55 },
00105 { 122, -60 }
00106 };
00107
00108 static const int ea_adpcm_table[] = {
00109 0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
00110 3, 4, 7, 8, 10, 11, 0, -1, -3, -4
00111 };
00112
00113
00114 static const int swf_index_tables[4][16] = {
00115 { -1, 2 },
00116 { -1, -1, 2, 4 },
00117 { -1, -1, -1, -1, 2, 4, 6, 8 },
00118 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
00119 };
00120
00121 static const int yamaha_indexscale[] = {
00122 230, 230, 230, 230, 307, 409, 512, 614,
00123 230, 230, 230, 230, 307, 409, 512, 614
00124 };
00125
00126 static const int yamaha_difflookup[] = {
00127 1, 3, 5, 7, 9, 11, 13, 15,
00128 -1, -3, -5, -7, -9, -11, -13, -15
00129 };
00130
00131
00132
00133 typedef struct ADPCMChannelStatus {
00134 int predictor;
00135 short int step_index;
00136 int step;
00137
00138 int prev_sample;
00139
00140
00141 short sample1;
00142 short sample2;
00143 int coeff1;
00144 int coeff2;
00145 int idelta;
00146 } ADPCMChannelStatus;
00147
00148 typedef struct TrellisPath {
00149 int nibble;
00150 int prev;
00151 } TrellisPath;
00152
00153 typedef struct TrellisNode {
00154 uint32_t ssd;
00155 int path;
00156 int sample1;
00157 int sample2;
00158 int step;
00159 } TrellisNode;
00160
00161 typedef struct ADPCMContext {
00162 ADPCMChannelStatus status[6];
00163 TrellisPath *paths;
00164 TrellisNode *node_buf;
00165 TrellisNode **nodep_buf;
00166 uint8_t *trellis_hash;
00167 } ADPCMContext;
00168
00169 #define FREEZE_INTERVAL 128
00170
00171
00172
00173 #if CONFIG_ENCODERS
00174 static av_cold int adpcm_encode_init(AVCodecContext *avctx)
00175 {
00176 ADPCMContext *s = avctx->priv_data;
00177 uint8_t *extradata;
00178 int i;
00179 if (avctx->channels > 2)
00180 return -1;
00181
00182 if(avctx->trellis && (unsigned)avctx->trellis > 16U){
00183 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
00184 return -1;
00185 }
00186
00187 if (avctx->trellis) {
00188 int frontier = 1 << avctx->trellis;
00189 int max_paths = frontier * FREEZE_INTERVAL;
00190 FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error);
00191 FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error);
00192 FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error);
00193 FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error);
00194 }
00195
00196 switch(avctx->codec->id) {
00197 case CODEC_ID_ADPCM_IMA_WAV:
00198 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1;
00199
00200 avctx->block_align = BLKSIZE;
00201
00202 break;
00203 case CODEC_ID_ADPCM_IMA_QT:
00204 avctx->frame_size = 64;
00205 avctx->block_align = 34 * avctx->channels;
00206 break;
00207 case CODEC_ID_ADPCM_MS:
00208 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
00209
00210 avctx->block_align = BLKSIZE;
00211 avctx->extradata_size = 32;
00212 extradata = avctx->extradata = av_malloc(avctx->extradata_size);
00213 if (!extradata)
00214 return AVERROR(ENOMEM);
00215 bytestream_put_le16(&extradata, avctx->frame_size);
00216 bytestream_put_le16(&extradata, 7);
00217 for (i = 0; i < 7; i++) {
00218 bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4);
00219 bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4);
00220 }
00221 break;
00222 case CODEC_ID_ADPCM_YAMAHA:
00223 avctx->frame_size = BLKSIZE * avctx->channels;
00224 avctx->block_align = BLKSIZE;
00225 break;
00226 case CODEC_ID_ADPCM_SWF:
00227 if (avctx->sample_rate != 11025 &&
00228 avctx->sample_rate != 22050 &&
00229 avctx->sample_rate != 44100) {
00230 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
00231 goto error;
00232 }
00233 avctx->frame_size = 512 * (avctx->sample_rate / 11025);
00234 break;
00235 default:
00236 goto error;
00237 }
00238
00239 avctx->coded_frame= avcodec_alloc_frame();
00240 avctx->coded_frame->key_frame= 1;
00241
00242 return 0;
00243 error:
00244 av_freep(&s->paths);
00245 av_freep(&s->node_buf);
00246 av_freep(&s->nodep_buf);
00247 av_freep(&s->trellis_hash);
00248 return -1;
00249 }
00250
00251 static av_cold int adpcm_encode_close(AVCodecContext *avctx)
00252 {
00253 ADPCMContext *s = avctx->priv_data;
00254 av_freep(&avctx->coded_frame);
00255 av_freep(&s->paths);
00256 av_freep(&s->node_buf);
00257 av_freep(&s->nodep_buf);
00258 av_freep(&s->trellis_hash);
00259
00260 return 0;
00261 }
00262
00263
00264 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
00265 {
00266 int delta = sample - c->prev_sample;
00267 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
00268 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
00269 c->prev_sample = av_clip_int16(c->prev_sample);
00270 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
00271 return nibble;
00272 }
00273
00274 static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample)
00275 {
00276 int delta = sample - c->prev_sample;
00277 int diff, step = step_table[c->step_index];
00278 int nibble = 8*(delta < 0);
00279
00280 delta= abs(delta);
00281 diff = delta + (step >> 3);
00282
00283 if (delta >= step) {
00284 nibble |= 4;
00285 delta -= step;
00286 }
00287 step >>= 1;
00288 if (delta >= step) {
00289 nibble |= 2;
00290 delta -= step;
00291 }
00292 step >>= 1;
00293 if (delta >= step) {
00294 nibble |= 1;
00295 delta -= step;
00296 }
00297 diff -= delta;
00298
00299 if (nibble & 8)
00300 c->prev_sample -= diff;
00301 else
00302 c->prev_sample += diff;
00303
00304 c->prev_sample = av_clip_int16(c->prev_sample);
00305 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
00306
00307 return nibble;
00308 }
00309
00310 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
00311 {
00312 int predictor, nibble, bias;
00313
00314 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
00315
00316 nibble= sample - predictor;
00317 if(nibble>=0) bias= c->idelta/2;
00318 else bias=-c->idelta/2;
00319
00320 nibble= (nibble + bias) / c->idelta;
00321 nibble= av_clip(nibble, -8, 7)&0x0F;
00322
00323 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00324
00325 c->sample2 = c->sample1;
00326 c->sample1 = av_clip_int16(predictor);
00327
00328 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00329 if (c->idelta < 16) c->idelta = 16;
00330
00331 return nibble;
00332 }
00333
00334 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
00335 {
00336 int nibble, delta;
00337
00338 if(!c->step) {
00339 c->predictor = 0;
00340 c->step = 127;
00341 }
00342
00343 delta = sample - c->predictor;
00344
00345 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
00346
00347 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
00348 c->predictor = av_clip_int16(c->predictor);
00349 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00350 c->step = av_clip(c->step, 127, 24567);
00351
00352 return nibble;
00353 }
00354
00355 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
00356 uint8_t *dst, ADPCMChannelStatus *c, int n)
00357 {
00358
00359 ADPCMContext *s = avctx->priv_data;
00360 const int frontier = 1 << avctx->trellis;
00361 const int stride = avctx->channels;
00362 const int version = avctx->codec->id;
00363 TrellisPath *paths = s->paths, *p;
00364 TrellisNode *node_buf = s->node_buf;
00365 TrellisNode **nodep_buf = s->nodep_buf;
00366 TrellisNode **nodes = nodep_buf;
00367 TrellisNode **nodes_next = nodep_buf + frontier;
00368 int pathn = 0, froze = -1, i, j, k, generation = 0;
00369 uint8_t *hash = s->trellis_hash;
00370 memset(hash, 0xff, 65536 * sizeof(*hash));
00371
00372 memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
00373 nodes[0] = node_buf + frontier;
00374 nodes[0]->ssd = 0;
00375 nodes[0]->path = 0;
00376 nodes[0]->step = c->step_index;
00377 nodes[0]->sample1 = c->sample1;
00378 nodes[0]->sample2 = c->sample2;
00379 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF))
00380 nodes[0]->sample1 = c->prev_sample;
00381 if(version == CODEC_ID_ADPCM_MS)
00382 nodes[0]->step = c->idelta;
00383 if(version == CODEC_ID_ADPCM_YAMAHA) {
00384 if(c->step == 0) {
00385 nodes[0]->step = 127;
00386 nodes[0]->sample1 = 0;
00387 } else {
00388 nodes[0]->step = c->step;
00389 nodes[0]->sample1 = c->predictor;
00390 }
00391 }
00392
00393 for(i=0; i<n; i++) {
00394 TrellisNode *t = node_buf + frontier*(i&1);
00395 TrellisNode **u;
00396 int sample = samples[i*stride];
00397 int heap_pos = 0;
00398 memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
00399 for(j=0; j<frontier && nodes[j]; j++) {
00400
00401 const int range = (j < frontier/2) ? 1 : 0;
00402 const int step = nodes[j]->step;
00403 int nidx;
00404 if(version == CODEC_ID_ADPCM_MS) {
00405 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64;
00406 const int div = (sample - predictor) / step;
00407 const int nmin = av_clip(div-range, -8, 6);
00408 const int nmax = av_clip(div+range, -7, 7);
00409 for(nidx=nmin; nidx<=nmax; nidx++) {
00410 const int nibble = nidx & 0xf;
00411 int dec_sample = predictor + nidx * step;
00412 #define STORE_NODE(NAME, STEP_INDEX)\
00413 int d;\
00414 uint32_t ssd;\
00415 int pos;\
00416 TrellisNode *u;\
00417 uint8_t *h;\
00418 dec_sample = av_clip_int16(dec_sample);\
00419 d = sample - dec_sample;\
00420 ssd = nodes[j]->ssd + d*d;\
00421
00422
00423
00424 \
00425 if (ssd < nodes[j]->ssd)\
00426 goto next_##NAME;\
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437 \
00438 h = &hash[(uint16_t) dec_sample];\
00439 if (*h == generation)\
00440 goto next_##NAME;\
00441 if (heap_pos < frontier) {\
00442 pos = heap_pos++;\
00443 } else {\
00444
00445 \
00446 pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\
00447 if (ssd > nodes_next[pos]->ssd)\
00448 goto next_##NAME;\
00449 heap_pos++;\
00450 }\
00451 *h = generation;\
00452 u = nodes_next[pos];\
00453 if(!u) {\
00454 assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\
00455 u = t++;\
00456 nodes_next[pos] = u;\
00457 u->path = pathn++;\
00458 }\
00459 u->ssd = ssd;\
00460 u->step = STEP_INDEX;\
00461 u->sample2 = nodes[j]->sample1;\
00462 u->sample1 = dec_sample;\
00463 paths[u->path].nibble = nibble;\
00464 paths[u->path].prev = nodes[j]->path;\
00465
00466 \
00467 while (pos > 0) {\
00468 int parent = (pos - 1) >> 1;\
00469 if (nodes_next[parent]->ssd <= ssd)\
00470 break;\
00471 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
00472 pos = parent;\
00473 }\
00474 next_##NAME:;
00475 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8));
00476 }
00477 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) {
00478 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
00479 const int predictor = nodes[j]->sample1;\
00480 const int div = (sample - predictor) * 4 / STEP_TABLE;\
00481 int nmin = av_clip(div-range, -7, 6);\
00482 int nmax = av_clip(div+range, -6, 7);\
00483 if(nmin<=0) nmin--; \
00484 if(nmax<0) nmax--;\
00485 for(nidx=nmin; nidx<=nmax; nidx++) {\
00486 const int nibble = nidx<0 ? 7-nidx : nidx;\
00487 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
00488 STORE_NODE(NAME, STEP_INDEX);\
00489 }
00490 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
00491 } else {
00492 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
00493 #undef LOOP_NODES
00494 #undef STORE_NODE
00495 }
00496 }
00497
00498 u = nodes;
00499 nodes = nodes_next;
00500 nodes_next = u;
00501
00502 generation++;
00503 if (generation == 255) {
00504 memset(hash, 0xff, 65536 * sizeof(*hash));
00505 generation = 0;
00506 }
00507
00508
00509 if(nodes[0]->ssd > (1<<28)) {
00510 for(j=1; j<frontier && nodes[j]; j++)
00511 nodes[j]->ssd -= nodes[0]->ssd;
00512 nodes[0]->ssd = 0;
00513 }
00514
00515
00516 if(i == froze + FREEZE_INTERVAL) {
00517 p = &paths[nodes[0]->path];
00518 for(k=i; k>froze; k--) {
00519 dst[k] = p->nibble;
00520 p = &paths[p->prev];
00521 }
00522 froze = i;
00523 pathn = 0;
00524
00525
00526
00527 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
00528 }
00529 }
00530
00531 p = &paths[nodes[0]->path];
00532 for(i=n-1; i>froze; i--) {
00533 dst[i] = p->nibble;
00534 p = &paths[p->prev];
00535 }
00536
00537 c->predictor = nodes[0]->sample1;
00538 c->sample1 = nodes[0]->sample1;
00539 c->sample2 = nodes[0]->sample2;
00540 c->step_index = nodes[0]->step;
00541 c->step = nodes[0]->step;
00542 c->idelta = nodes[0]->step;
00543 }
00544
00545 static int adpcm_encode_frame(AVCodecContext *avctx,
00546 unsigned char *frame, int buf_size, void *data)
00547 {
00548 int n, i, st;
00549 short *samples;
00550 unsigned char *dst;
00551 ADPCMContext *c = avctx->priv_data;
00552 uint8_t *buf;
00553
00554 dst = frame;
00555 samples = (short *)data;
00556 st= avctx->channels == 2;
00557
00558
00559 switch(avctx->codec->id) {
00560 case CODEC_ID_ADPCM_IMA_WAV:
00561 n = avctx->frame_size / 8;
00562 c->status[0].prev_sample = (signed short)samples[0];
00563
00564 bytestream_put_le16(&dst, c->status[0].prev_sample);
00565 *dst++ = (unsigned char)c->status[0].step_index;
00566 *dst++ = 0;
00567 samples++;
00568 if (avctx->channels == 2) {
00569 c->status[1].prev_sample = (signed short)samples[0];
00570
00571 bytestream_put_le16(&dst, c->status[1].prev_sample);
00572 *dst++ = (unsigned char)c->status[1].step_index;
00573 *dst++ = 0;
00574 samples++;
00575 }
00576
00577
00578 if(avctx->trellis > 0) {
00579 FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error);
00580 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8);
00581 if(avctx->channels == 2)
00582 adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8);
00583 for(i=0; i<n; i++) {
00584 *dst++ = buf[8*i+0] | (buf[8*i+1] << 4);
00585 *dst++ = buf[8*i+2] | (buf[8*i+3] << 4);
00586 *dst++ = buf[8*i+4] | (buf[8*i+5] << 4);
00587 *dst++ = buf[8*i+6] | (buf[8*i+7] << 4);
00588 if (avctx->channels == 2) {
00589 uint8_t *buf1 = buf + n*8;
00590 *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4);
00591 *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4);
00592 *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4);
00593 *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4);
00594 }
00595 }
00596 av_free(buf);
00597 } else
00598 for (; n>0; n--) {
00599 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
00600 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
00601 dst++;
00602 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
00603 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
00604 dst++;
00605 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
00606 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
00607 dst++;
00608 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
00609 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
00610 dst++;
00611
00612 if (avctx->channels == 2) {
00613 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
00614 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
00615 dst++;
00616 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
00617 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
00618 dst++;
00619 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
00620 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
00621 dst++;
00622 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
00623 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
00624 dst++;
00625 }
00626 samples += 8 * avctx->channels;
00627 }
00628 break;
00629 case CODEC_ID_ADPCM_IMA_QT:
00630 {
00631 int ch, i;
00632 PutBitContext pb;
00633 init_put_bits(&pb, dst, buf_size*8);
00634
00635 for(ch=0; ch<avctx->channels; ch++){
00636 put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
00637 put_bits(&pb, 7, c->status[ch].step_index);
00638 if(avctx->trellis > 0) {
00639 uint8_t buf[64];
00640 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
00641 for(i=0; i<64; i++)
00642 put_bits(&pb, 4, buf[i^1]);
00643 } else {
00644 for (i=0; i<64; i+=2){
00645 int t1, t2;
00646 t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
00647 t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
00648 put_bits(&pb, 4, t2);
00649 put_bits(&pb, 4, t1);
00650 }
00651 }
00652 }
00653
00654 flush_put_bits(&pb);
00655 dst += put_bits_count(&pb)>>3;
00656 break;
00657 }
00658 case CODEC_ID_ADPCM_SWF:
00659 {
00660 int i;
00661 PutBitContext pb;
00662 init_put_bits(&pb, dst, buf_size*8);
00663
00664 n = avctx->frame_size-1;
00665
00666
00667 put_bits(&pb, 2, 2);
00668
00669
00670 for(i=0; i<avctx->channels; i++){
00671 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
00672 put_sbits(&pb, 16, samples[i]);
00673 put_bits(&pb, 6, c->status[i].step_index);
00674 c->status[i].prev_sample = (signed short)samples[i];
00675 }
00676
00677 if(avctx->trellis > 0) {
00678 FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error);
00679 adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n);
00680 if (avctx->channels == 2)
00681 adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n);
00682 for(i=0; i<n; i++) {
00683 put_bits(&pb, 4, buf[i]);
00684 if (avctx->channels == 2)
00685 put_bits(&pb, 4, buf[n+i]);
00686 }
00687 av_free(buf);
00688 } else {
00689 for (i=1; i<avctx->frame_size; i++) {
00690 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
00691 if (avctx->channels == 2)
00692 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
00693 }
00694 }
00695 flush_put_bits(&pb);
00696 dst += put_bits_count(&pb)>>3;
00697 break;
00698 }
00699 case CODEC_ID_ADPCM_MS:
00700 for(i=0; i<avctx->channels; i++){
00701 int predictor=0;
00702
00703 *dst++ = predictor;
00704 c->status[i].coeff1 = AdaptCoeff1[predictor];
00705 c->status[i].coeff2 = AdaptCoeff2[predictor];
00706 }
00707 for(i=0; i<avctx->channels; i++){
00708 if (c->status[i].idelta < 16)
00709 c->status[i].idelta = 16;
00710
00711 bytestream_put_le16(&dst, c->status[i].idelta);
00712 }
00713 for(i=0; i<avctx->channels; i++){
00714 c->status[i].sample2= *samples++;
00715 }
00716 for(i=0; i<avctx->channels; i++){
00717 c->status[i].sample1= *samples++;
00718
00719 bytestream_put_le16(&dst, c->status[i].sample1);
00720 }
00721 for(i=0; i<avctx->channels; i++)
00722 bytestream_put_le16(&dst, c->status[i].sample2);
00723
00724 if(avctx->trellis > 0) {
00725 int n = avctx->block_align - 7*avctx->channels;
00726 FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error);
00727 if(avctx->channels == 1) {
00728 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
00729 for(i=0; i<n; i+=2)
00730 *dst++ = (buf[i] << 4) | buf[i+1];
00731 } else {
00732 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
00733 adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n);
00734 for(i=0; i<n; i++)
00735 *dst++ = (buf[i] << 4) | buf[n+i];
00736 }
00737 av_free(buf);
00738 } else
00739 for(i=7*avctx->channels; i<avctx->block_align; i++) {
00740 int nibble;
00741 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
00742 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
00743 *dst++ = nibble;
00744 }
00745 break;
00746 case CODEC_ID_ADPCM_YAMAHA:
00747 n = avctx->frame_size / 2;
00748 if(avctx->trellis > 0) {
00749 FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error);
00750 n *= 2;
00751 if(avctx->channels == 1) {
00752 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
00753 for(i=0; i<n; i+=2)
00754 *dst++ = buf[i] | (buf[i+1] << 4);
00755 } else {
00756 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
00757 adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n);
00758 for(i=0; i<n; i++)
00759 *dst++ = buf[i] | (buf[n+i] << 4);
00760 }
00761 av_free(buf);
00762 } else
00763 for (n *= avctx->channels; n>0; n--) {
00764 int nibble;
00765 nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
00766 nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
00767 *dst++ = nibble;
00768 }
00769 break;
00770 default:
00771 error:
00772 return -1;
00773 }
00774 return dst - frame;
00775 }
00776 #endif //CONFIG_ENCODERS
00777
00778 static av_cold int adpcm_decode_init(AVCodecContext * avctx)
00779 {
00780 ADPCMContext *c = avctx->priv_data;
00781 unsigned int max_channels = 2;
00782
00783 switch(avctx->codec->id) {
00784 case CODEC_ID_ADPCM_EA_R1:
00785 case CODEC_ID_ADPCM_EA_R2:
00786 case CODEC_ID_ADPCM_EA_R3:
00787 case CODEC_ID_ADPCM_EA_XAS:
00788 max_channels = 6;
00789 break;
00790 }
00791 if(avctx->channels > max_channels){
00792 return -1;
00793 }
00794
00795 switch(avctx->codec->id) {
00796 case CODEC_ID_ADPCM_CT:
00797 c->status[0].step = c->status[1].step = 511;
00798 break;
00799 case CODEC_ID_ADPCM_IMA_WAV:
00800 if (avctx->bits_per_coded_sample != 4) {
00801 av_log(avctx, AV_LOG_ERROR, "Only 4-bit ADPCM IMA WAV files are supported\n");
00802 return -1;
00803 }
00804 break;
00805 case CODEC_ID_ADPCM_IMA_WS:
00806 if (avctx->extradata && avctx->extradata_size == 2 * 4) {
00807 c->status[0].predictor = AV_RL32(avctx->extradata);
00808 c->status[1].predictor = AV_RL32(avctx->extradata + 4);
00809 }
00810 break;
00811 default:
00812 break;
00813 }
00814 avctx->sample_fmt = AV_SAMPLE_FMT_S16;
00815 return 0;
00816 }
00817
00818 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
00819 {
00820 int step_index;
00821 int predictor;
00822 int sign, delta, diff, step;
00823
00824 step = step_table[c->step_index];
00825 step_index = c->step_index + index_table[(unsigned)nibble];
00826 if (step_index < 0) step_index = 0;
00827 else if (step_index > 88) step_index = 88;
00828
00829 sign = nibble & 8;
00830 delta = nibble & 7;
00831
00832
00833
00834 diff = ((2 * delta + 1) * step) >> shift;
00835 predictor = c->predictor;
00836 if (sign) predictor -= diff;
00837 else predictor += diff;
00838
00839 c->predictor = av_clip_int16(predictor);
00840 c->step_index = step_index;
00841
00842 return (short)c->predictor;
00843 }
00844
00845 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
00846 {
00847 int step_index;
00848 int predictor;
00849 int diff, step;
00850
00851 step = step_table[c->step_index];
00852 step_index = c->step_index + index_table[nibble];
00853 step_index = av_clip(step_index, 0, 88);
00854
00855 diff = step >> 3;
00856 if (nibble & 4) diff += step;
00857 if (nibble & 2) diff += step >> 1;
00858 if (nibble & 1) diff += step >> 2;
00859
00860 if (nibble & 8)
00861 predictor = c->predictor - diff;
00862 else
00863 predictor = c->predictor + diff;
00864
00865 c->predictor = av_clip_int16(predictor);
00866 c->step_index = step_index;
00867
00868 return c->predictor;
00869 }
00870
00871 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
00872 {
00873 int predictor;
00874
00875 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
00876 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00877
00878 c->sample2 = c->sample1;
00879 c->sample1 = av_clip_int16(predictor);
00880 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00881 if (c->idelta < 16) c->idelta = 16;
00882
00883 return c->sample1;
00884 }
00885
00886 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
00887 {
00888 int sign, delta, diff;
00889 int new_step;
00890
00891 sign = nibble & 8;
00892 delta = nibble & 7;
00893
00894
00895
00896 diff = ((2 * delta + 1) * c->step) >> 3;
00897
00898 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
00899 c->predictor = av_clip_int16(c->predictor);
00900
00901 new_step = (AdaptationTable[nibble & 7] * c->step) >> 8;
00902 c->step = av_clip(new_step, 511, 32767);
00903
00904 return (short)c->predictor;
00905 }
00906
00907 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
00908 {
00909 int sign, delta, diff;
00910
00911 sign = nibble & (1<<(size-1));
00912 delta = nibble & ((1<<(size-1))-1);
00913 diff = delta << (7 + c->step + shift);
00914
00915
00916 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
00917
00918
00919 if (delta >= (2*size - 3) && c->step < 3)
00920 c->step++;
00921 else if (delta == 0 && c->step > 0)
00922 c->step--;
00923
00924 return (short) c->predictor;
00925 }
00926
00927 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
00928 {
00929 if(!c->step) {
00930 c->predictor = 0;
00931 c->step = 127;
00932 }
00933
00934 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
00935 c->predictor = av_clip_int16(c->predictor);
00936 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00937 c->step = av_clip(c->step, 127, 24567);
00938 return c->predictor;
00939 }
00940
00941 static void xa_decode(short *out, const unsigned char *in,
00942 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
00943 {
00944 int i, j;
00945 int shift,filter,f0,f1;
00946 int s_1,s_2;
00947 int d,s,t;
00948
00949 for(i=0;i<4;i++) {
00950
00951 shift = 12 - (in[4+i*2] & 15);
00952 filter = in[4+i*2] >> 4;
00953 f0 = xa_adpcm_table[filter][0];
00954 f1 = xa_adpcm_table[filter][1];
00955
00956 s_1 = left->sample1;
00957 s_2 = left->sample2;
00958
00959 for(j=0;j<28;j++) {
00960 d = in[16+i+j*4];
00961
00962 t = (signed char)(d<<4)>>4;
00963 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00964 s_2 = s_1;
00965 s_1 = av_clip_int16(s);
00966 *out = s_1;
00967 out += inc;
00968 }
00969
00970 if (inc==2) {
00971 left->sample1 = s_1;
00972 left->sample2 = s_2;
00973 s_1 = right->sample1;
00974 s_2 = right->sample2;
00975 out = out + 1 - 28*2;
00976 }
00977
00978 shift = 12 - (in[5+i*2] & 15);
00979 filter = in[5+i*2] >> 4;
00980
00981 f0 = xa_adpcm_table[filter][0];
00982 f1 = xa_adpcm_table[filter][1];
00983
00984 for(j=0;j<28;j++) {
00985 d = in[16+i+j*4];
00986
00987 t = (signed char)d >> 4;
00988 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00989 s_2 = s_1;
00990 s_1 = av_clip_int16(s);
00991 *out = s_1;
00992 out += inc;
00993 }
00994
00995 if (inc==2) {
00996 right->sample1 = s_1;
00997 right->sample2 = s_2;
00998 out -= 1;
00999 } else {
01000 left->sample1 = s_1;
01001 left->sample2 = s_2;
01002 }
01003 }
01004 }
01005
01006
01007
01008 #define DK3_GET_NEXT_NIBBLE() \
01009 if (decode_top_nibble_next) \
01010 { \
01011 nibble = last_byte >> 4; \
01012 decode_top_nibble_next = 0; \
01013 } \
01014 else \
01015 { \
01016 last_byte = *src++; \
01017 if (src >= buf + buf_size) break; \
01018 nibble = last_byte & 0x0F; \
01019 decode_top_nibble_next = 1; \
01020 }
01021
01022 static int adpcm_decode_frame(AVCodecContext *avctx,
01023 void *data, int *data_size,
01024 AVPacket *avpkt)
01025 {
01026 const uint8_t *buf = avpkt->data;
01027 int buf_size = avpkt->size;
01028 ADPCMContext *c = avctx->priv_data;
01029 ADPCMChannelStatus *cs;
01030 int n, m, channel, i;
01031 int block_predictor[2];
01032 short *samples;
01033 short *samples_end;
01034 const uint8_t *src;
01035 int st;
01036
01037
01038 unsigned char last_byte = 0;
01039 unsigned char nibble;
01040 int decode_top_nibble_next = 0;
01041 int diff_channel;
01042
01043
01044 uint32_t samples_in_chunk;
01045 int32_t previous_left_sample, previous_right_sample;
01046 int32_t current_left_sample, current_right_sample;
01047 int32_t next_left_sample, next_right_sample;
01048 int32_t coeff1l, coeff2l, coeff1r, coeff2r;
01049 uint8_t shift_left, shift_right;
01050 int count1, count2;
01051 int coeff[2][2], shift[2];
01052
01053 if (!buf_size)
01054 return 0;
01055
01056
01057
01058
01059 if(*data_size/4 < buf_size + 8)
01060 return -1;
01061
01062 samples = data;
01063 samples_end= samples + *data_size/2;
01064 *data_size= 0;
01065 src = buf;
01066
01067 st = avctx->channels == 2 ? 1 : 0;
01068
01069 switch(avctx->codec->id) {
01070 case CODEC_ID_ADPCM_IMA_QT:
01071 n = buf_size - 2*avctx->channels;
01072 for (channel = 0; channel < avctx->channels; channel++) {
01073 int16_t predictor;
01074 int step_index;
01075 cs = &(c->status[channel]);
01076
01077
01078
01079 predictor = AV_RB16(src);
01080 step_index = predictor & 0x7F;
01081 predictor &= 0xFF80;
01082
01083 src += 2;
01084
01085 if (cs->step_index == step_index) {
01086 int diff = (int)predictor - cs->predictor;
01087 if (diff < 0)
01088 diff = - diff;
01089 if (diff > 0x7f)
01090 goto update;
01091 } else {
01092 update:
01093 cs->step_index = step_index;
01094 cs->predictor = predictor;
01095 }
01096
01097 if (cs->step_index > 88){
01098 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
01099 cs->step_index = 88;
01100 }
01101
01102 samples = (short*)data + channel;
01103
01104 for(m=32; n>0 && m>0; n--, m--) {
01105 *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
01106 samples += avctx->channels;
01107 *samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3);
01108 samples += avctx->channels;
01109 src ++;
01110 }
01111 }
01112 if (st)
01113 samples--;
01114 break;
01115 case CODEC_ID_ADPCM_IMA_WAV:
01116 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01117 buf_size = avctx->block_align;
01118
01119
01120
01121 for(i=0; i<avctx->channels; i++){
01122 cs = &(c->status[i]);
01123 cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
01124
01125 cs->step_index = *src++;
01126 if (cs->step_index > 88){
01127 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
01128 cs->step_index = 88;
01129 }
01130 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]);
01131 }
01132
01133 while(src < buf + buf_size){
01134 for(m=0; m<4; m++){
01135 for(i=0; i<=st; i++)
01136 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
01137 for(i=0; i<=st; i++)
01138 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
01139 src++;
01140 }
01141 src += 4*st;
01142 }
01143 break;
01144 case CODEC_ID_ADPCM_4XM:
01145 cs = &(c->status[0]);
01146 c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
01147 if(st){
01148 c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
01149 }
01150 c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
01151 if(st){
01152 c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
01153 }
01154 if (cs->step_index < 0) cs->step_index = 0;
01155 if (cs->step_index > 88) cs->step_index = 88;
01156
01157 m= (buf_size - (src - buf))>>st;
01158 for(i=0; i<m; i++) {
01159 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
01160 if (st)
01161 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
01162 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
01163 if (st)
01164 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
01165 }
01166
01167 src += m<<st;
01168
01169 break;
01170 case CODEC_ID_ADPCM_MS:
01171 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01172 buf_size = avctx->block_align;
01173 n = buf_size - 7 * avctx->channels;
01174 if (n < 0)
01175 return -1;
01176 block_predictor[0] = av_clip(*src++, 0, 6);
01177 block_predictor[1] = 0;
01178 if (st)
01179 block_predictor[1] = av_clip(*src++, 0, 6);
01180 c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
01181 if (st){
01182 c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
01183 }
01184 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]];
01185 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]];
01186 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]];
01187 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]];
01188
01189 c->status[0].sample1 = bytestream_get_le16(&src);
01190 if (st) c->status[1].sample1 = bytestream_get_le16(&src);
01191 c->status[0].sample2 = bytestream_get_le16(&src);
01192 if (st) c->status[1].sample2 = bytestream_get_le16(&src);
01193
01194 *samples++ = c->status[0].sample2;
01195 if (st) *samples++ = c->status[1].sample2;
01196 *samples++ = c->status[0].sample1;
01197 if (st) *samples++ = c->status[1].sample1;
01198 for(;n>0;n--) {
01199 *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
01200 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
01201 src ++;
01202 }
01203 break;
01204 case CODEC_ID_ADPCM_IMA_DK4:
01205 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01206 buf_size = avctx->block_align;
01207
01208 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01209 c->status[0].step_index = *src++;
01210 src++;
01211 *samples++ = c->status[0].predictor;
01212 if (st) {
01213 c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
01214 c->status[1].step_index = *src++;
01215 src++;
01216 *samples++ = c->status[1].predictor;
01217 }
01218 while (src < buf + buf_size) {
01219
01220
01221 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01222 src[0] >> 4, 3);
01223
01224
01225
01226 if (st)
01227 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01228 src[0] & 0x0F, 3);
01229 else
01230 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01231 src[0] & 0x0F, 3);
01232
01233 src++;
01234 }
01235 break;
01236 case CODEC_ID_ADPCM_IMA_DK3:
01237 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01238 buf_size = avctx->block_align;
01239
01240 if(buf_size + 16 > (samples_end - samples)*3/8)
01241 return -1;
01242
01243 c->status[0].predictor = (int16_t)AV_RL16(src + 10);
01244 c->status[1].predictor = (int16_t)AV_RL16(src + 12);
01245 c->status[0].step_index = src[14];
01246 c->status[1].step_index = src[15];
01247
01248 src += 16;
01249 diff_channel = c->status[1].predictor;
01250
01251
01252
01253 while (1) {
01254
01255
01256
01257
01258
01259 DK3_GET_NEXT_NIBBLE();
01260 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01261
01262
01263 DK3_GET_NEXT_NIBBLE();
01264 adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
01265
01266
01267 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01268 *samples++ = c->status[0].predictor + c->status[1].predictor;
01269 *samples++ = c->status[0].predictor - c->status[1].predictor;
01270
01271
01272 DK3_GET_NEXT_NIBBLE();
01273 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01274
01275
01276 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01277 *samples++ = c->status[0].predictor + c->status[1].predictor;
01278 *samples++ = c->status[0].predictor - c->status[1].predictor;
01279 }
01280 break;
01281 case CODEC_ID_ADPCM_IMA_ISS:
01282 c->status[0].predictor = (int16_t)AV_RL16(src + 0);
01283 c->status[0].step_index = src[2];
01284 src += 4;
01285 if(st) {
01286 c->status[1].predictor = (int16_t)AV_RL16(src + 0);
01287 c->status[1].step_index = src[2];
01288 src += 4;
01289 }
01290
01291 while (src < buf + buf_size) {
01292
01293 if (st) {
01294 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01295 src[0] >> 4 , 3);
01296 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01297 src[0] & 0x0F, 3);
01298 } else {
01299 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01300 src[0] & 0x0F, 3);
01301 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01302 src[0] >> 4 , 3);
01303 }
01304
01305 src++;
01306 }
01307 break;
01308 case CODEC_ID_ADPCM_IMA_WS:
01309
01310 while (src < buf + buf_size) {
01311
01312 if (st) {
01313 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01314 src[0] >> 4 , 3);
01315 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01316 src[0] & 0x0F, 3);
01317 } else {
01318 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01319 src[0] >> 4 , 3);
01320 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01321 src[0] & 0x0F, 3);
01322 }
01323
01324 src++;
01325 }
01326 break;
01327 case CODEC_ID_ADPCM_XA:
01328 while (buf_size >= 128) {
01329 xa_decode(samples, src, &c->status[0], &c->status[1],
01330 avctx->channels);
01331 src += 128;
01332 samples += 28 * 8;
01333 buf_size -= 128;
01334 }
01335 break;
01336 case CODEC_ID_ADPCM_IMA_EA_EACS: {
01337 unsigned header_size = 4 + (8<<st);
01338 samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
01339
01340 if (buf_size < header_size || samples_in_chunk > buf_size - header_size) {
01341 src += buf_size - 4;
01342 break;
01343 }
01344
01345 for (i=0; i<=st; i++)
01346 c->status[i].step_index = bytestream_get_le32(&src);
01347 for (i=0; i<=st; i++)
01348 c->status[i].predictor = bytestream_get_le32(&src);
01349
01350 for (; samples_in_chunk; samples_in_chunk--, src++) {
01351 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
01352 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
01353 }
01354 break;
01355 }
01356 case CODEC_ID_ADPCM_IMA_EA_SEAD:
01357 for (; src < buf+buf_size; src++) {
01358 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
01359 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
01360 }
01361 break;
01362 case CODEC_ID_ADPCM_EA:
01363
01364
01365 if (buf_size < 12) {
01366 av_log(avctx, AV_LOG_ERROR, "frame too small\n");
01367 return AVERROR(EINVAL);
01368 }
01369 samples_in_chunk = AV_RL32(src);
01370 if (samples_in_chunk / 28 > (buf_size - 12) / 30) {
01371 av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
01372 return AVERROR(EINVAL);
01373 }
01374 src += 4;
01375 current_left_sample = (int16_t)bytestream_get_le16(&src);
01376 previous_left_sample = (int16_t)bytestream_get_le16(&src);
01377 current_right_sample = (int16_t)bytestream_get_le16(&src);
01378 previous_right_sample = (int16_t)bytestream_get_le16(&src);
01379
01380 for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
01381 coeff1l = ea_adpcm_table[ *src >> 4 ];
01382 coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
01383 coeff1r = ea_adpcm_table[*src & 0x0F];
01384 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
01385 src++;
01386
01387 shift_left = (*src >> 4 ) + 8;
01388 shift_right = (*src & 0x0F) + 8;
01389 src++;
01390
01391 for (count2 = 0; count2 < 28; count2++) {
01392 next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left;
01393 next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right;
01394 src++;
01395
01396 next_left_sample = (next_left_sample +
01397 (current_left_sample * coeff1l) +
01398 (previous_left_sample * coeff2l) + 0x80) >> 8;
01399 next_right_sample = (next_right_sample +
01400 (current_right_sample * coeff1r) +
01401 (previous_right_sample * coeff2r) + 0x80) >> 8;
01402
01403 previous_left_sample = current_left_sample;
01404 current_left_sample = av_clip_int16(next_left_sample);
01405 previous_right_sample = current_right_sample;
01406 current_right_sample = av_clip_int16(next_right_sample);
01407 *samples++ = (unsigned short)current_left_sample;
01408 *samples++ = (unsigned short)current_right_sample;
01409 }
01410 }
01411
01412 if (src - buf == buf_size - 2)
01413 src += 2;
01414
01415 break;
01416 case CODEC_ID_ADPCM_EA_MAXIS_XA:
01417 for(channel = 0; channel < avctx->channels; channel++) {
01418 for (i=0; i<2; i++)
01419 coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
01420 shift[channel] = (*src & 0x0F) + 8;
01421 src++;
01422 }
01423 for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) {
01424 for(i = 4; i >= 0; i-=4) {
01425 for(channel = 0; channel < avctx->channels; channel++) {
01426 int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel];
01427 sample = (sample +
01428 c->status[channel].sample1 * coeff[channel][0] +
01429 c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
01430 c->status[channel].sample2 = c->status[channel].sample1;
01431 c->status[channel].sample1 = av_clip_int16(sample);
01432 *samples++ = c->status[channel].sample1;
01433 }
01434 }
01435 src+=avctx->channels;
01436 }
01437 break;
01438 case CODEC_ID_ADPCM_EA_R1:
01439 case CODEC_ID_ADPCM_EA_R2:
01440 case CODEC_ID_ADPCM_EA_R3: {
01441
01442
01443
01444
01445 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
01446 int32_t previous_sample, current_sample, next_sample;
01447 int32_t coeff1, coeff2;
01448 uint8_t shift;
01449 unsigned int channel;
01450 uint16_t *samplesC;
01451 const uint8_t *srcC;
01452 const uint8_t *src_end = buf + buf_size;
01453
01454 samples_in_chunk = (big_endian ? bytestream_get_be32(&src)
01455 : bytestream_get_le32(&src)) / 28;
01456 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) ||
01457 28*samples_in_chunk*avctx->channels > samples_end-samples) {
01458 src += buf_size - 4;
01459 break;
01460 }
01461
01462 for (channel=0; channel<avctx->channels; channel++) {
01463 int32_t offset = (big_endian ? bytestream_get_be32(&src)
01464 : bytestream_get_le32(&src))
01465 + (avctx->channels-channel-1) * 4;
01466
01467 if ((offset < 0) || (offset >= src_end - src - 4)) break;
01468 srcC = src + offset;
01469 samplesC = samples + channel;
01470
01471 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
01472 current_sample = (int16_t)bytestream_get_le16(&srcC);
01473 previous_sample = (int16_t)bytestream_get_le16(&srcC);
01474 } else {
01475 current_sample = c->status[channel].predictor;
01476 previous_sample = c->status[channel].prev_sample;
01477 }
01478
01479 for (count1=0; count1<samples_in_chunk; count1++) {
01480 if (*srcC == 0xEE) {
01481 srcC++;
01482 if (srcC > src_end - 30*2) break;
01483 current_sample = (int16_t)bytestream_get_be16(&srcC);
01484 previous_sample = (int16_t)bytestream_get_be16(&srcC);
01485
01486 for (count2=0; count2<28; count2++) {
01487 *samplesC = (int16_t)bytestream_get_be16(&srcC);
01488 samplesC += avctx->channels;
01489 }
01490 } else {
01491 coeff1 = ea_adpcm_table[ *srcC>>4 ];
01492 coeff2 = ea_adpcm_table[(*srcC>>4) + 4];
01493 shift = (*srcC++ & 0x0F) + 8;
01494
01495 if (srcC > src_end - 14) break;
01496 for (count2=0; count2<28; count2++) {
01497 if (count2 & 1)
01498 next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift;
01499 else
01500 next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift;
01501
01502 next_sample += (current_sample * coeff1) +
01503 (previous_sample * coeff2);
01504 next_sample = av_clip_int16(next_sample >> 8);
01505
01506 previous_sample = current_sample;
01507 current_sample = next_sample;
01508 *samplesC = current_sample;
01509 samplesC += avctx->channels;
01510 }
01511 }
01512 }
01513
01514 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
01515 c->status[channel].predictor = current_sample;
01516 c->status[channel].prev_sample = previous_sample;
01517 }
01518 }
01519
01520 src = src + buf_size - (4 + 4*avctx->channels);
01521 samples += 28 * samples_in_chunk * avctx->channels;
01522 break;
01523 }
01524 case CODEC_ID_ADPCM_EA_XAS:
01525 if (samples_end-samples < 32*4*avctx->channels
01526 || buf_size < (4+15)*4*avctx->channels) {
01527 src += buf_size;
01528 break;
01529 }
01530 for (channel=0; channel<avctx->channels; channel++) {
01531 int coeff[2][4], shift[4];
01532 short *s2, *s = &samples[channel];
01533 for (n=0; n<4; n++, s+=32*avctx->channels) {
01534 for (i=0; i<2; i++)
01535 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
01536 shift[n] = (src[2]&0x0F) + 8;
01537 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
01538 s2[0] = (src[0]&0xF0) + (src[1]<<8);
01539 }
01540
01541 for (m=2; m<32; m+=2) {
01542 s = &samples[m*avctx->channels + channel];
01543 for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
01544 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
01545 int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n];
01546 int pred = s2[-1*avctx->channels] * coeff[0][n]
01547 + s2[-2*avctx->channels] * coeff[1][n];
01548 s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
01549 }
01550 }
01551 }
01552 }
01553 samples += 32*4*avctx->channels;
01554 break;
01555 case CODEC_ID_ADPCM_IMA_AMV:
01556 case CODEC_ID_ADPCM_IMA_SMJPEG:
01557 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01558 c->status[0].step_index = bytestream_get_le16(&src);
01559
01560 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01561 src+=4;
01562
01563 while (src < buf + buf_size) {
01564 char hi, lo;
01565 lo = *src & 0x0F;
01566 hi = *src >> 4;
01567
01568 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01569 FFSWAP(char, hi, lo);
01570
01571 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01572 lo, 3);
01573 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01574 hi, 3);
01575 src++;
01576 }
01577 break;
01578 case CODEC_ID_ADPCM_CT:
01579 while (src < buf + buf_size) {
01580 if (st) {
01581 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01582 src[0] >> 4);
01583 *samples++ = adpcm_ct_expand_nibble(&c->status[1],
01584 src[0] & 0x0F);
01585 } else {
01586 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01587 src[0] >> 4);
01588 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01589 src[0] & 0x0F);
01590 }
01591 src++;
01592 }
01593 break;
01594 case CODEC_ID_ADPCM_SBPRO_4:
01595 case CODEC_ID_ADPCM_SBPRO_3:
01596 case CODEC_ID_ADPCM_SBPRO_2:
01597 if (!c->status[0].step_index) {
01598
01599 *samples++ = 128 * (*src++ - 0x80);
01600 if (st)
01601 *samples++ = 128 * (*src++ - 0x80);
01602 c->status[0].step_index = 1;
01603 }
01604 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
01605 while (src < buf + buf_size) {
01606 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01607 src[0] >> 4, 4, 0);
01608 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01609 src[0] & 0x0F, 4, 0);
01610 src++;
01611 }
01612 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
01613 while (src < buf + buf_size && samples + 2 < samples_end) {
01614 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01615 src[0] >> 5 , 3, 0);
01616 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01617 (src[0] >> 2) & 0x07, 3, 0);
01618 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01619 src[0] & 0x03, 2, 0);
01620 src++;
01621 }
01622 } else {
01623 while (src < buf + buf_size && samples + 3 < samples_end) {
01624 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01625 src[0] >> 6 , 2, 2);
01626 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01627 (src[0] >> 4) & 0x03, 2, 2);
01628 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01629 (src[0] >> 2) & 0x03, 2, 2);
01630 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01631 src[0] & 0x03, 2, 2);
01632 src++;
01633 }
01634 }
01635 break;
01636 case CODEC_ID_ADPCM_SWF:
01637 {
01638 GetBitContext gb;
01639 const int *table;
01640 int k0, signmask, nb_bits, count;
01641 int size = buf_size*8;
01642
01643 init_get_bits(&gb, buf, size);
01644
01645
01646 nb_bits = get_bits(&gb, 2)+2;
01647
01648 table = swf_index_tables[nb_bits-2];
01649 k0 = 1 << (nb_bits-2);
01650 signmask = 1 << (nb_bits-1);
01651
01652 while (get_bits_count(&gb) <= size - 22*avctx->channels) {
01653 for (i = 0; i < avctx->channels; i++) {
01654 *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
01655 c->status[i].step_index = get_bits(&gb, 6);
01656 }
01657
01658 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
01659 int i;
01660
01661 for (i = 0; i < avctx->channels; i++) {
01662
01663 int delta = get_bits(&gb, nb_bits);
01664 int step = step_table[c->status[i].step_index];
01665 long vpdiff = 0;
01666 int k = k0;
01667
01668 do {
01669 if (delta & k)
01670 vpdiff += step;
01671 step >>= 1;
01672 k >>= 1;
01673 } while(k);
01674 vpdiff += step;
01675
01676 if (delta & signmask)
01677 c->status[i].predictor -= vpdiff;
01678 else
01679 c->status[i].predictor += vpdiff;
01680
01681 c->status[i].step_index += table[delta & (~signmask)];
01682
01683 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
01684 c->status[i].predictor = av_clip_int16(c->status[i].predictor);
01685
01686 *samples++ = c->status[i].predictor;
01687 if (samples >= samples_end) {
01688 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01689 return -1;
01690 }
01691 }
01692 }
01693 }
01694 src += buf_size;
01695 break;
01696 }
01697 case CODEC_ID_ADPCM_YAMAHA:
01698 while (src < buf + buf_size) {
01699 if (st) {
01700 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01701 src[0] & 0x0F);
01702 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
01703 src[0] >> 4 );
01704 } else {
01705 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01706 src[0] & 0x0F);
01707 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01708 src[0] >> 4 );
01709 }
01710 src++;
01711 }
01712 break;
01713 case CODEC_ID_ADPCM_THP:
01714 {
01715 int table[2][16];
01716 unsigned int samplecnt;
01717 int prev[2][2];
01718 int ch;
01719
01720 if (buf_size < 80) {
01721 av_log(avctx, AV_LOG_ERROR, "frame too small\n");
01722 return -1;
01723 }
01724
01725 src+=4;
01726 samplecnt = bytestream_get_be32(&src);
01727
01728 for (i = 0; i < 32; i++)
01729 table[0][i] = (int16_t)bytestream_get_be16(&src);
01730
01731
01732 for (i = 0; i < 4; i++)
01733 prev[0][i] = (int16_t)bytestream_get_be16(&src);
01734
01735 if (samplecnt >= (samples_end - samples) / (st + 1)) {
01736 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01737 return -1;
01738 }
01739
01740 for (ch = 0; ch <= st; ch++) {
01741 samples = (unsigned short *) data + ch;
01742
01743
01744 for (i = 0; i < samplecnt / 14; i++) {
01745 int index = (*src >> 4) & 7;
01746 unsigned int exp = 28 - (*src++ & 15);
01747 int factor1 = table[ch][index * 2];
01748 int factor2 = table[ch][index * 2 + 1];
01749
01750
01751 for (n = 0; n < 14; n++) {
01752 int32_t sampledat;
01753 if(n&1) sampledat= *src++ <<28;
01754 else sampledat= (*src&0xF0)<<24;
01755
01756 sampledat = ((prev[ch][0]*factor1
01757 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
01758 *samples = av_clip_int16(sampledat);
01759 prev[ch][1] = prev[ch][0];
01760 prev[ch][0] = *samples++;
01761
01762
01763
01764 samples += st;
01765 }
01766 }
01767 }
01768
01769
01770
01771 samples -= st;
01772 break;
01773 }
01774
01775 default:
01776 return -1;
01777 }
01778 *data_size = (uint8_t *)samples - (uint8_t *)data;
01779 return src - buf;
01780 }
01781
01782
01783
01784 #if CONFIG_ENCODERS
01785 #define ADPCM_ENCODER(id,name,long_name_) \
01786 AVCodec ff_ ## name ## _encoder = { \
01787 #name, \
01788 AVMEDIA_TYPE_AUDIO, \
01789 id, \
01790 sizeof(ADPCMContext), \
01791 adpcm_encode_init, \
01792 adpcm_encode_frame, \
01793 adpcm_encode_close, \
01794 NULL, \
01795 .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \
01796 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
01797 }
01798 #else
01799 #define ADPCM_ENCODER(id,name,long_name_)
01800 #endif
01801
01802 #if CONFIG_DECODERS
01803 #define ADPCM_DECODER(id,name,long_name_) \
01804 AVCodec ff_ ## name ## _decoder = { \
01805 #name, \
01806 AVMEDIA_TYPE_AUDIO, \
01807 id, \
01808 sizeof(ADPCMContext), \
01809 adpcm_decode_init, \
01810 NULL, \
01811 NULL, \
01812 adpcm_decode_frame, \
01813 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
01814 }
01815 #else
01816 #define ADPCM_DECODER(id,name,long_name_)
01817 #endif
01818
01819 #define ADPCM_CODEC(id,name,long_name_) \
01820 ADPCM_ENCODER(id,name,long_name_); ADPCM_DECODER(id,name,long_name_)
01821
01822
01823 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
01824 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
01825 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
01826 ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
01827 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1");
01828 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
01829 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
01830 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
01831 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
01832 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
01833 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
01834 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
01835 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
01836 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
01837 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
01838 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
01839 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
01840 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
01841 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
01842 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
01843 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
01844 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
01845 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
01846 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP");
01847 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
01848 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");