Skip to content
  • Categories
  • Recent
  • Tags
  • Popular
  • World
  • Users
  • Groups
Skins
  • Light
  • Cerulean
  • Cosmo
  • Flatly
  • Journal
  • Litera
  • Lumen
  • Lux
  • Materia
  • Minty
  • Morph
  • Pulse
  • Sandstone
  • Simplex
  • Sketchy
  • Spacelab
  • United
  • Yeti
  • Zephyr
  • Dark
  • Cyborg
  • Darkly
  • Quartz
  • Slate
  • Solar
  • Superhero
  • Vapor

  • Default (No Skin)
  • No Skin
Collapse
Code Project
  1. Home
  2. General Programming
  3. C#
  4. Converting ulaw to alaw

Converting ulaw to alaw

Scheduled Pinned Locked Moved C#
question
6 Posts 4 Posters 63 Views 1 Watching
  • Oldest to Newest
  • Newest to Oldest
  • Most Votes
Reply
  • Reply as topic
Log in to reply
This topic has been deleted. Only users with topic management privileges can see it.
  • C Offline
    C Offline
    charles henington
    wrote on last edited by
    #1

    I've been trying to convert ulaw to alaw following a few examples that I found online. I'm able to covert the alaw to ulaw quite well but when converting back to alaw the output becomes distorted. If anyone knows a better solution please do share. Here is the code that is producing the distorted alaw.

    public static NativePointer Encode(G711.Ulaw ulaw)
    {
    if (ulaw == null || !ulaw.ContainsAudio()) throw new Exception(nameof(ulaw));
    List bytes = [];
    NativePointer pointer = ulaw._nativePointer!;
    byte[] result = new byte[pointer.Size];
    Marshal.Copy(pointer, result, 0, pointer.Size);
    foreach(byte byt in result)
    {
    bytes.Add(LinearToALawSample(MuLawToLinearSample(byt)));
    }

    return new(bytes);
    static byte LinearToALawSample(short pcm\_val)
    {          
    
        int mask;
        int seg;
        byte aval;
    
        if (pcm\_val >= 0)
        {
            mask = 0xD5; // sign (7th) bit = 1
        }
        else
        {                    
            mask = 0x55; // sign bit = 0
            pcm\_val = (short)-pcm\_val;
            if (pcm\_val > EncoderInfo.ALAW\_MAX)
            {
                pcm\_val = EncoderInfo.ALAW\_MAX;
            }
        }
    
        if (pcm\_val < 256)
        {
            aval = (byte)(pcm\_val >> 4);
    
        }
        else
        {
            seg = 1;
            for (int i = pcm\_val; i > 256; i >>= 1)
            {
                seg++;
            }
            aval = (byte)((seg << EncoderInfo.SEG\_SHIFT) | ((pcm\_val >> (seg + 3)) & EncoderInfo.QUANT\_MASK));
        }
    
        return (byte)(((aval & EncoderInfo.SEG\_MASK) ^ mask) & EncoderInfo.SIGN\_BIT);
    }
    static short MuLawToLinearSample(byte muLaw)
    {                
        int sign = (muLaw & EncoderInfo.SIGN\_BIT) >> 7;
        int exponent = (muLaw & EncoderInfo.SEG\_MASK) >> 4;
        int mantissa = muLaw & 0x0F;
        int sample = ((mantissa << 3) + EncoderInfo.BIAS) << (exponent + 2);
        return (short)(sign == 0 ? sample : -sample);
    }
    

    }

    Here is the EncoderInfo class.

    internal static class EncoderInfo
    {
    public const int BIAS = 0x84;
    public const int SEG_MASK = 0x70;
    public const int SIGN_BIT = 0x80;
    public const int ALAW_MAX = 0xFFF;
    public const int QUANT_MASK = 0xF;
    public const int SEG_SHIFT = 4;
    }

    T P J 3 Replies Last reply
    0
    • C charles henington

      I've been trying to convert ulaw to alaw following a few examples that I found online. I'm able to covert the alaw to ulaw quite well but when converting back to alaw the output becomes distorted. If anyone knows a better solution please do share. Here is the code that is producing the distorted alaw.

      public static NativePointer Encode(G711.Ulaw ulaw)
      {
      if (ulaw == null || !ulaw.ContainsAudio()) throw new Exception(nameof(ulaw));
      List bytes = [];
      NativePointer pointer = ulaw._nativePointer!;
      byte[] result = new byte[pointer.Size];
      Marshal.Copy(pointer, result, 0, pointer.Size);
      foreach(byte byt in result)
      {
      bytes.Add(LinearToALawSample(MuLawToLinearSample(byt)));
      }

      return new(bytes);
      static byte LinearToALawSample(short pcm\_val)
      {          
      
          int mask;
          int seg;
          byte aval;
      
          if (pcm\_val >= 0)
          {
              mask = 0xD5; // sign (7th) bit = 1
          }
          else
          {                    
              mask = 0x55; // sign bit = 0
              pcm\_val = (short)-pcm\_val;
              if (pcm\_val > EncoderInfo.ALAW\_MAX)
              {
                  pcm\_val = EncoderInfo.ALAW\_MAX;
              }
          }
      
          if (pcm\_val < 256)
          {
              aval = (byte)(pcm\_val >> 4);
      
          }
          else
          {
              seg = 1;
              for (int i = pcm\_val; i > 256; i >>= 1)
              {
                  seg++;
              }
              aval = (byte)((seg << EncoderInfo.SEG\_SHIFT) | ((pcm\_val >> (seg + 3)) & EncoderInfo.QUANT\_MASK));
          }
      
          return (byte)(((aval & EncoderInfo.SEG\_MASK) ^ mask) & EncoderInfo.SIGN\_BIT);
      }
      static short MuLawToLinearSample(byte muLaw)
      {                
          int sign = (muLaw & EncoderInfo.SIGN\_BIT) >> 7;
          int exponent = (muLaw & EncoderInfo.SEG\_MASK) >> 4;
          int mantissa = muLaw & 0x0F;
          int sample = ((mantissa << 3) + EncoderInfo.BIAS) << (exponent + 2);
          return (short)(sign == 0 ? sample : -sample);
      }
      

      }

      Here is the EncoderInfo class.

      internal static class EncoderInfo
      {
      public const int BIAS = 0x84;
      public const int SEG_MASK = 0x70;
      public const int SIGN_BIT = 0x80;
      public const int ALAW_MAX = 0xFFF;
      public const int QUANT_MASK = 0xF;
      public const int SEG_SHIFT = 4;
      }

      T Offline
      T Offline
      trønderen
      wrote on last edited by
      #2

      Disclaimer: I never tried to do any such conversion myself. But I read in the G.711 standard: ------------------- If a m-A conversion is followed by an A-m conversion, most of the octets are restored to their original values. Only those octets which correspond to m-law decoder output value numbers 0, 2, 4, 6, 8, 10, 12, 14 are changed (the numbers being increased by 1). Moreover, in these octets, only bit No. 8 (least significant bit in PCM) is changed. Accordingly, the double conversion m-A-m is transparent to bits Nos. 1-7. Similarly, if an A-m conversion is followed by a m-A conversion, only the octets corresponding to A-law decoder output value numbers 26, 28, 30, 32, 45, 47, 63 and 80 are changed. Again, only bit No. 8 is changed, i.e. the double conversion A-m-A, too, is transparent to bits No. 1-7. A consequence of this property is that in most of the analogue voice frequency signal range the additional quantizing distortion caused by m-A-m or A-m-A conversion is considerably lower than that caused by either m-A or A-m conversion (see Recommendation G.113). ------------------- It sounds to me like you cannot do a bit transparent mu-A-mu or A-mu-A conversion cycle. Extra distortion will be introduced. Maybe this is the distortion you are referring to. In case you do not have the G.711 standard document (from which the above quote is taken), you can retrieve it from www.itu.int Note the red text: Corresponding ANSI-C code is available in the G.711 module of the ITU-T G.191 Software Tools Library. This is a huge library; the source text fills almost 70 MB. You might find useful code there.

      Religious freedom is the freedom to say that two plus two make five.

      C 1 Reply Last reply
      0
      • T trønderen

        Disclaimer: I never tried to do any such conversion myself. But I read in the G.711 standard: ------------------- If a m-A conversion is followed by an A-m conversion, most of the octets are restored to their original values. Only those octets which correspond to m-law decoder output value numbers 0, 2, 4, 6, 8, 10, 12, 14 are changed (the numbers being increased by 1). Moreover, in these octets, only bit No. 8 (least significant bit in PCM) is changed. Accordingly, the double conversion m-A-m is transparent to bits Nos. 1-7. Similarly, if an A-m conversion is followed by a m-A conversion, only the octets corresponding to A-law decoder output value numbers 26, 28, 30, 32, 45, 47, 63 and 80 are changed. Again, only bit No. 8 is changed, i.e. the double conversion A-m-A, too, is transparent to bits No. 1-7. A consequence of this property is that in most of the analogue voice frequency signal range the additional quantizing distortion caused by m-A-m or A-m-A conversion is considerably lower than that caused by either m-A or A-m conversion (see Recommendation G.113). ------------------- It sounds to me like you cannot do a bit transparent mu-A-mu or A-mu-A conversion cycle. Extra distortion will be introduced. Maybe this is the distortion you are referring to. In case you do not have the G.711 standard document (from which the above quote is taken), you can retrieve it from www.itu.int Note the red text: Corresponding ANSI-C code is available in the G.711 module of the ITU-T G.191 Software Tools Library. This is a huge library; the source text fills almost 70 MB. You might find useful code there.

        Religious freedom is the freedom to say that two plus two make five.

        C Offline
        C Offline
        charles henington
        wrote on last edited by
        #3

        Thanks, I will look deeper into this and see where it leads. The distortion I speak of could very well be explained by this. I will need to look at a ulaw file that has not been encoded by my alaw/ulaw encoders first to determine if it's do to the specs or something within my encoders causing it.

        1 Reply Last reply
        0
        • C charles henington

          I've been trying to convert ulaw to alaw following a few examples that I found online. I'm able to covert the alaw to ulaw quite well but when converting back to alaw the output becomes distorted. If anyone knows a better solution please do share. Here is the code that is producing the distorted alaw.

          public static NativePointer Encode(G711.Ulaw ulaw)
          {
          if (ulaw == null || !ulaw.ContainsAudio()) throw new Exception(nameof(ulaw));
          List bytes = [];
          NativePointer pointer = ulaw._nativePointer!;
          byte[] result = new byte[pointer.Size];
          Marshal.Copy(pointer, result, 0, pointer.Size);
          foreach(byte byt in result)
          {
          bytes.Add(LinearToALawSample(MuLawToLinearSample(byt)));
          }

          return new(bytes);
          static byte LinearToALawSample(short pcm\_val)
          {          
          
              int mask;
              int seg;
              byte aval;
          
              if (pcm\_val >= 0)
              {
                  mask = 0xD5; // sign (7th) bit = 1
              }
              else
              {                    
                  mask = 0x55; // sign bit = 0
                  pcm\_val = (short)-pcm\_val;
                  if (pcm\_val > EncoderInfo.ALAW\_MAX)
                  {
                      pcm\_val = EncoderInfo.ALAW\_MAX;
                  }
              }
          
              if (pcm\_val < 256)
              {
                  aval = (byte)(pcm\_val >> 4);
          
              }
              else
              {
                  seg = 1;
                  for (int i = pcm\_val; i > 256; i >>= 1)
                  {
                      seg++;
                  }
                  aval = (byte)((seg << EncoderInfo.SEG\_SHIFT) | ((pcm\_val >> (seg + 3)) & EncoderInfo.QUANT\_MASK));
              }
          
              return (byte)(((aval & EncoderInfo.SEG\_MASK) ^ mask) & EncoderInfo.SIGN\_BIT);
          }
          static short MuLawToLinearSample(byte muLaw)
          {                
              int sign = (muLaw & EncoderInfo.SIGN\_BIT) >> 7;
              int exponent = (muLaw & EncoderInfo.SEG\_MASK) >> 4;
              int mantissa = muLaw & 0x0F;
              int sample = ((mantissa << 3) + EncoderInfo.BIAS) << (exponent + 2);
              return (short)(sign == 0 ? sample : -sample);
          }
          

          }

          Here is the EncoderInfo class.

          internal static class EncoderInfo
          {
          public const int BIAS = 0x84;
          public const int SEG_MASK = 0x70;
          public const int SIGN_BIT = 0x80;
          public const int ALAW_MAX = 0xFFF;
          public const int QUANT_MASK = 0xF;
          public const int SEG_SHIFT = 4;
          }

          P Offline
          P Offline
          Peter_in_2780
          wrote on last edited by
          #4

          Further to the above excellent suggestions, why not create a 0..FF ramp and pass it through your process? Spotting the distortion would be very easy then. Anything involving bit-shifting like this makes me very wary about sign extension. I've been bitten by assuming something is unsigned, wondering where all the high order 1's came from when I used >> .

          Software rusts. Simon Stephenson, ca 1994. So does this signature. me, 2012

          1 Reply Last reply
          0
          • C charles henington

            I've been trying to convert ulaw to alaw following a few examples that I found online. I'm able to covert the alaw to ulaw quite well but when converting back to alaw the output becomes distorted. If anyone knows a better solution please do share. Here is the code that is producing the distorted alaw.

            public static NativePointer Encode(G711.Ulaw ulaw)
            {
            if (ulaw == null || !ulaw.ContainsAudio()) throw new Exception(nameof(ulaw));
            List bytes = [];
            NativePointer pointer = ulaw._nativePointer!;
            byte[] result = new byte[pointer.Size];
            Marshal.Copy(pointer, result, 0, pointer.Size);
            foreach(byte byt in result)
            {
            bytes.Add(LinearToALawSample(MuLawToLinearSample(byt)));
            }

            return new(bytes);
            static byte LinearToALawSample(short pcm\_val)
            {          
            
                int mask;
                int seg;
                byte aval;
            
                if (pcm\_val >= 0)
                {
                    mask = 0xD5; // sign (7th) bit = 1
                }
                else
                {                    
                    mask = 0x55; // sign bit = 0
                    pcm\_val = (short)-pcm\_val;
                    if (pcm\_val > EncoderInfo.ALAW\_MAX)
                    {
                        pcm\_val = EncoderInfo.ALAW\_MAX;
                    }
                }
            
                if (pcm\_val < 256)
                {
                    aval = (byte)(pcm\_val >> 4);
            
                }
                else
                {
                    seg = 1;
                    for (int i = pcm\_val; i > 256; i >>= 1)
                    {
                        seg++;
                    }
                    aval = (byte)((seg << EncoderInfo.SEG\_SHIFT) | ((pcm\_val >> (seg + 3)) & EncoderInfo.QUANT\_MASK));
                }
            
                return (byte)(((aval & EncoderInfo.SEG\_MASK) ^ mask) & EncoderInfo.SIGN\_BIT);
            }
            static short MuLawToLinearSample(byte muLaw)
            {                
                int sign = (muLaw & EncoderInfo.SIGN\_BIT) >> 7;
                int exponent = (muLaw & EncoderInfo.SEG\_MASK) >> 4;
                int mantissa = muLaw & 0x0F;
                int sample = ((mantissa << 3) + EncoderInfo.BIAS) << (exponent + 2);
                return (short)(sign == 0 ? sample : -sample);
            }
            

            }

            Here is the EncoderInfo class.

            internal static class EncoderInfo
            {
            public const int BIAS = 0x84;
            public const int SEG_MASK = 0x70;
            public const int SIGN_BIT = 0x80;
            public const int ALAW_MAX = 0xFFF;
            public const int QUANT_MASK = 0xF;
            public const int SEG_SHIFT = 4;
            }

            J Offline
            J Offline
            jeron1
            wrote on last edited by
            #5

            Maybe a simple translation table like the one in this link? g711/ulaw.go at master · zaf/g711 · GitHub[^]

            "the debugger doesn't tell me anything because this code compiles just fine" - random QA comment "Facebook is where you tell lies to your friends. Twitter is where you tell the truth to strangers." - chriselst "I don't drink any more... then again, I don't drink any less." - Mike Mullikins uncle

            C 1 Reply Last reply
            0
            • J jeron1

              Maybe a simple translation table like the one in this link? g711/ulaw.go at master · zaf/g711 · GitHub[^]

              "the debugger doesn't tell me anything because this code compiles just fine" - random QA comment "Facebook is where you tell lies to your friends. Twitter is where you tell the truth to strangers." - chriselst "I don't drink any more... then again, I don't drink any less." - Mike Mullikins uncle

              C Offline
              C Offline
              charles henington
              wrote on last edited by
              #6

              Thanks for the link. The error was in fact in the Ulaw to Alaw Encoding, The output audio now sounds identical although the samples are different. I will have to look into this I assume it could be do to endianness.

              1 Reply Last reply
              0
              Reply
              • Reply as topic
              Log in to reply
              • Oldest to Newest
              • Newest to Oldest
              • Most Votes


              • Login

              • Don't have an account? Register

              • Login or register to search.
              • First post
                Last post
              0
              • Categories
              • Recent
              • Tags
              • Popular
              • World
              • Users
              • Groups