This commit is contained in:
Cryptiiiic 2021-03-01 14:06:50 -08:00
parent e6c21f160e
commit 1e06b98d58
No known key found for this signature in database
GPG key ID: 4CCAE32CC026C76D
24647 changed files with 12 additions and 4826333 deletions

View file

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CanonicalName</key>
<string>macosx10.13</string>
<key>CustomProperties</key>
<dict>
<key>KERNEL_EXTENSION_HEADER_SEARCH_PATHS</key>
<string>$(KERNEL_FRAMEWORK)/PrivateHeaders $(KERNEL_FRAMEWORK_HEADERS)</string>
</dict>
<key>DefaultProperties</key>
<dict>
<key>MACOSX_DEPLOYMENT_TARGET</key>
<string>10.13</string>
<key>PLATFORM_NAME</key>
<string>macosx</string>
<key>DEFAULT_KEXT_INSTALL_PATH</key>
<string>$(LIBRARY_KEXT_INSTALL_PATH)</string>
<key>KASAN_DEFAULT_CFLAGS</key>
<string>-DKASAN=1 -fsanitize=address -mllvm -asan-globals-live-support -mllvm -asan-force-dynamic-shadow</string>
</dict>
<key>DisplayName</key>
<string>macOS 10.13</string>
<key>MaximumDeploymentTarget</key>
<string>10.13</string>
<key>MinimalDisplayName</key>
<string>10.13</string>
<key>MinimumSupportedToolsVersion</key>
<string>3.2</string>
<key>SupportedBuildToolComponents</key>
<array>
<string>com.apple.compilers.gcc.headers.4_2</string>
</array>
<key>Version</key>
<string>10.13</string>
<key>isBaseSDK</key>
<string>YES</string>
</dict>
</plist>

View file

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ProductBuildVersion</key>
<string>17A315g</string>
<key>ProductCopyright</key>
<string>1983-2017 Apple Inc.</string>
<key>ProductName</key>
<string>Mac OS X</string>
<key>ProductUserVisibleVersion</key>
<string>10.13</string>
<key>ProductVersion</key>
<string>10.13</string>
</dict>
</plist>

View file

@ -1 +0,0 @@
Versions/Current/AGL.tbd

View file

@ -1 +0,0 @@
Versions/Current/Headers

View file

@ -1,31 +0,0 @@
--- !tapi-tbd-v2
archs: [ i386, x86_64 ]
uuids: [ 'i386: 43F7250F-A1A8-3884-99FD-6095B123611D', 'x86_64: 30F343A0-A026-3BE2-B58E-E41CD7F1C789' ]
platform: macosx
install-name: /System/Library/Frameworks/AGL.framework/Versions/A/AGL
objc-constraint: none
exports:
- archs: [ i386, x86_64 ]
symbols: [ _aglChoosePixelFormat, _aglChoosePixelFormatCFM, _aglConfigure,
_aglConfigureCFM, _aglCopyContext, _aglCreateContext, _aglCreateContextCFM,
_aglCreatePBuffer, _aglCreatePixelFormat, _aglCreatePixelFormatCFM,
_aglDescribePBuffer, _aglDescribePixelFormat, _aglDescribeRenderer,
_aglDestroyContext, _aglDestroyPBuffer, _aglDestroyPixelFormat,
_aglDestroyRendererInfo, _aglDevicesOfPixelFormat, _aglDisable,
_aglDisplaysOfPixelFormat, _aglEnable, _aglErrorString, _aglGetCGLContext,
_aglGetCGLPixelFormat, _aglGetCurrentContext, _aglGetDrawable,
_aglGetError, _aglGetHIViewRef, _aglGetInteger, _aglGetPBuffer,
_aglGetVersion, _aglGetVirtualScreen, _aglGetWindowRef, _aglIsEnabled,
_aglNextPixelFormat, _aglNextRendererInfo, _aglQueryRendererInfo,
_aglQueryRendererInfoCFM, _aglQueryRendererInfoForCGDirectDisplayIDs,
_aglResetLibrary, _aglSetCurrentContext, _aglSetDrawable,
_aglSetFullScreen, _aglSetHIViewRef, _aglSetInteger, _aglSetOffScreen,
_aglSetPBuffer, _aglSetVirtualScreen, _aglSetWindowRef, _aglSurfaceTexture,
_aglSwapBuffers, _aglTexImagePBuffer, _aglUpdateContext, _aglUseFont,
_glmCalloc, _glmCopy, _glmDCBAlloc, _glmDCBFree, _glmDCBRealloc,
_glmFree, _glmGetError, _glmGetInteger, _glmMalloc, _glmPageFreeAll,
_glmRealloc, _glmReportMemoryStatus, _glmResetMemoryStatus,
_glmSetDebugInfo, _glmSetDouble, _glmSetFunc, _glmSetInteger,
_glmSetMode, _glmSetUByte, _glmSetUInt, _glmSetUShort, _glmVecAlloc,
_glmVecFree, _glmVecRealloc, _glmZero ]
...

View file

@ -1,421 +0,0 @@
/*
File: AGL/agl.h
Contains: Basic AGL data types, constants and function prototypes.
Version: Technology: Mac OS X
Release: GM
Copyright: (c) 2000-2010 Apple, Inc. All rights reserved.
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef _AGL_H
#define _AGL_H
#include <OpenGL/OpenGLAvailability.h>
#include <Carbon/Carbon.h>
#include <OpenGL/gl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
** AGL API version.
*/
#define AGL_VERSION_3_0 1
/*
** Macintosh device type.
*
* Note:
* AGLDevice is a QuickDraw type it has been deprecated, use CGDirectDisplayID
*/
typedef GDHandle AGLDevice OPENGL_DEPRECATED(10_0, 10_5);
/*
** Macintosh drawable type.
*
* Note:
* AGLDrawable is a QuickDraw type it has been deprecated. use WindowRef or HIViewRef
*/
typedef CGrafPtr AGLDrawable OPENGL_DEPRECATED(10_0, 10_5);
/*
** AGL opaque data.
*/
typedef struct __AGLRendererInfoRec *AGLRendererInfo OPENGL_DEPRECATED(10_0, 10_9);
typedef struct __AGLPixelFormatRec *AGLPixelFormat OPENGL_DEPRECATED(10_0, 10_9);
typedef struct __AGLContextRec *AGLContext OPENGL_DEPRECATED(10_0, 10_9);
typedef struct __AGLPBufferRec *AGLPbuffer OPENGL_DEPRECATED(10_0, 10_9);
/************************************************************************/
/*
** Attribute names for aglChoosePixelFormat and aglDescribePixelFormat.
*/
#define AGL_NONE 0
#define AGL_ALL_RENDERERS 1 /* choose from all available renderers */
#define AGL_BUFFER_SIZE 2 /* depth of the index buffer */
#define AGL_LEVEL 3 /* level in plane stacking */
#define AGL_RGBA 4 /* choose an RGBA format */
#define AGL_DOUBLEBUFFER 5 /* double buffering supported */
#define AGL_STEREO 6 /* stereo buffering supported */
#define AGL_AUX_BUFFERS 7 /* number of aux buffers */
#define AGL_RED_SIZE 8 /* number of red component bits */
#define AGL_GREEN_SIZE 9 /* number of green component bits */
#define AGL_BLUE_SIZE 10 /* number of blue component bits */
#define AGL_ALPHA_SIZE 11 /* number of alpha component bits */
#define AGL_DEPTH_SIZE 12 /* number of depth bits */
#define AGL_STENCIL_SIZE 13 /* number of stencil bits */
#define AGL_ACCUM_RED_SIZE 14 /* number of red accum bits */
#define AGL_ACCUM_GREEN_SIZE 15 /* number of green accum bits */
#define AGL_ACCUM_BLUE_SIZE 16 /* number of blue accum bits */
#define AGL_ACCUM_ALPHA_SIZE 17 /* number of alpha accum bits */
/*
** Extended attributes
*/
#define AGL_PIXEL_SIZE 50 /* frame buffer bits per pixel */
#define AGL_MINIMUM_POLICY 51 /* never choose smaller buffers than requested */
#define AGL_MAXIMUM_POLICY 52 /* choose largest buffers of type requested */
#define AGL_OFFSCREEN 53 /* choose an off-screen capable renderer */
#define AGL_FULLSCREEN 54 /* choose a full-screen capable renderer */
#define AGL_SAMPLE_BUFFERS_ARB 55 /* number of multi sample buffers */
#define AGL_SAMPLES_ARB 56 /* number of samples per multi sample buffer */
#define AGL_AUX_DEPTH_STENCIL 57 /* independent depth and/or stencil buffers for the aux buffer */
#define AGL_COLOR_FLOAT 58 /* color buffers store floating point pixels */
#define AGL_MULTISAMPLE 59 /* choose multisample */
#define AGL_SUPERSAMPLE 60 /* choose supersample */
#define AGL_SAMPLE_ALPHA 61 /* request alpha filtering */
/*
** Renderer management
*/
#define AGL_RENDERER_ID 70 /* request renderer by ID */
#define AGL_SINGLE_RENDERER 71 /* choose a single renderer for all screens */
#define AGL_NO_RECOVERY 72 /* disable all failure recovery systems */
#define AGL_ACCELERATED 73 /* choose a hardware accelerated renderer */
#define AGL_CLOSEST_POLICY 74 /* choose the closest color buffer to request */
#define AGL_ROBUST 75 /* renderer does not need failure recovery */
#define AGL_BACKING_STORE 76 /* back buffer contents are valid after swap */
#define AGL_MP_SAFE 78 /* renderer is multi-processor safe */
#define AGL_WINDOW 80 /* can be used to render to a window */
#define AGL_MULTISCREEN 81 /* single window can span multiple screens */
#define AGL_VIRTUAL_SCREEN 82 /* virtual screen number */
#define AGL_COMPLIANT 83 /* renderer is opengl compliant */
#define AGL_DISPLAY_MASK 84 /* mask limiting supported displays */
#define AGL_PBUFFER 90 /* can be used to render to a pbuffer */
#define AGL_REMOTE_PBUFFER 91 /* can be used to render offline to a pbuffer */
#define AGL_ALLOW_OFFLINE_RENDERERS 96 /* show offline renderers in pixel formats */
/*
** Property names for aglDescribeRenderer
*/
/* #define AGL_OFFSCREEN 53 */
/* #define AGL_FULLSCREEN 54 */
/* #define AGL_RENDERER_ID 70 */
/* #define AGL_ACCELERATED 73 */
/* #define AGL_ROBUST 75 */
/* #define AGL_BACKING_STORE 76 */
/* #define AGL_MP_SAFE 78 */
/* #define AGL_WINDOW 80 */
/* #define AGL_MULTISCREEN 81 */
/* #define AGL_COMPLIANT 83 */
/* #define AGL_PBUFFER 90 */
#define AGL_BUFFER_MODES 100
#define AGL_MIN_LEVEL 101
#define AGL_MAX_LEVEL 102
#define AGL_COLOR_MODES 103
#define AGL_ACCUM_MODES 104
#define AGL_DEPTH_MODES 105
#define AGL_STENCIL_MODES 106
#define AGL_MAX_AUX_BUFFERS 107
#define AGL_VIDEO_MEMORY 120
#define AGL_TEXTURE_MEMORY 121
#define AGL_RENDERER_COUNT 128
/*
** Integer parameter names
*/
#define AGL_SWAP_RECT 200 /* Enable or set the swap rectangle */
#define AGL_BUFFER_RECT 202 /* Enable or set the buffer rectangle */
#define AGL_SWAP_LIMIT 203 /* Enable or disable the swap async limit */
#define AGL_COLORMAP_TRACKING 210 /* Enable or disable colormap tracking */
#define AGL_COLORMAP_ENTRY 212 /* Set a colormap entry to {index, r, g, b} */
#define AGL_RASTERIZATION 220 /* Enable or disable all rasterization */
#define AGL_SWAP_INTERVAL 222 /* 0 -> Don't sync, 1 -> Sync to vertical retrace */
#define AGL_STATE_VALIDATION 230 /* Validate state for multi-screen functionality */
#define AGL_BUFFER_NAME 231 /* Set the buffer name. Allows for multi ctx to share a buffer */
#define AGL_ORDER_CONTEXT_TO_FRONT 232 /* Order the current context in front of all the other contexts. */
#define AGL_CONTEXT_SURFACE_ID 233 /* aglGetInteger only - returns the ID of the drawable surface for the context */
#define AGL_CONTEXT_DISPLAY_ID 234 /* aglGetInteger only - returns the display ID(s) of all displays touched by the context, up to a maximum of 32 displays */
#define AGL_SURFACE_ORDER 235 /* Position of OpenGL surface relative to window: 1 -> Above window, -1 -> Below Window */
#define AGL_SURFACE_OPACITY 236 /* Opacity of OpenGL surface: 1 -> Surface is opaque (default), 0 -> non-opaque */
/* NOTE: AGL_CLIP_REGION is DEPRECATED_IN_MAC_OS_X_VERSION_10_5_AND_LATER */
/* It will only work on drawable types for binary compatibility */
#define AGL_CLIP_REGION 254 /* Enable or set the drawable clipping region */
#define AGL_FS_CAPTURE_SINGLE 255 /* Enable the capture of only a single display for aglFullScreen, normally disabled */
#define AGL_SURFACE_BACKING_SIZE 304 /* 2 params. Width/height of surface backing size */
#define AGL_ENABLE_SURFACE_BACKING_SIZE 305 /* Enable or disable surface backing size override */
#define AGL_SURFACE_VOLATILE 306 /* Flag surface to candidate for deletion */
/*
** Option names for aglConfigure.
*/
#define AGL_FORMAT_CACHE_SIZE 501 /* Set the size of the pixel format cache */
#define AGL_CLEAR_FORMAT_CACHE 502 /* Reset the pixel format cache */
#define AGL_RETAIN_RENDERERS 503 /* Whether to retain loaded renderers in memory */
/* buffer_modes */
#define AGL_MONOSCOPIC_BIT 0x00000001
#define AGL_STEREOSCOPIC_BIT 0x00000002
#define AGL_SINGLEBUFFER_BIT 0x00000004
#define AGL_DOUBLEBUFFER_BIT 0x00000008
/* bit depths */
#define AGL_0_BIT 0x00000001
#define AGL_1_BIT 0x00000002
#define AGL_2_BIT 0x00000004
#define AGL_3_BIT 0x00000008
#define AGL_4_BIT 0x00000010
#define AGL_5_BIT 0x00000020
#define AGL_6_BIT 0x00000040
#define AGL_8_BIT 0x00000080
#define AGL_10_BIT 0x00000100
#define AGL_12_BIT 0x00000200
#define AGL_16_BIT 0x00000400
#define AGL_24_BIT 0x00000800
#define AGL_32_BIT 0x00001000
#define AGL_48_BIT 0x00002000
#define AGL_64_BIT 0x00004000
#define AGL_96_BIT 0x00008000
#define AGL_128_BIT 0x00010000
/* color modes */
#define AGL_RGB8_BIT 0x00000001 /* 8 rgb bit/pixel, RGB=7:0, inverse colormap */
#define AGL_RGB8_A8_BIT 0x00000002 /* 8-8 argb bit/pixel, A=7:0, RGB=7:0, inverse colormap */
#define AGL_BGR233_BIT 0x00000004 /* 8 rgb bit/pixel, B=7:6, G=5:3, R=2:0 */
#define AGL_BGR233_A8_BIT 0x00000008 /* 8-8 argb bit/pixel, A=7:0, B=7:6, G=5:3, R=2:0 */
#define AGL_RGB332_BIT 0x00000010 /* 8 rgb bit/pixel, R=7:5, G=4:2, B=1:0 */
#define AGL_RGB332_A8_BIT 0x00000020 /* 8-8 argb bit/pixel, A=7:0, R=7:5, G=4:2, B=1:0 */
#define AGL_RGB444_BIT 0x00000040 /* 16 rgb bit/pixel, R=11:8, G=7:4, B=3:0 */
#define AGL_ARGB4444_BIT 0x00000080 /* 16 argb bit/pixel, A=15:12, R=11:8, G=7:4, B=3:0 */
#define AGL_RGB444_A8_BIT 0x00000100 /* 8-16 argb bit/pixel, A=7:0, R=11:8, G=7:4, B=3:0 */
#define AGL_RGB555_BIT 0x00000200 /* 16 rgb bit/pixel, R=14:10, G=9:5, B=4:0 */
#define AGL_ARGB1555_BIT 0x00000400 /* 16 argb bit/pixel, A=15, R=14:10, G=9:5, B=4:0 */
#define AGL_RGB555_A8_BIT 0x00000800 /* 8-16 argb bit/pixel, A=7:0, R=14:10, G=9:5, B=4:0 */
#define AGL_RGB565_BIT 0x00001000 /* 16 rgb bit/pixel, R=15:11, G=10:5, B=4:0 */
#define AGL_RGB565_A8_BIT 0x00002000 /* 8-16 argb bit/pixel, A=7:0, R=15:11, G=10:5, B=4:0 */
#define AGL_RGB888_BIT 0x00004000 /* 32 rgb bit/pixel, R=23:16, G=15:8, B=7:0 */
#define AGL_ARGB8888_BIT 0x00008000 /* 32 argb bit/pixel, A=31:24, R=23:16, G=15:8, B=7:0 */
#define AGL_RGB888_A8_BIT 0x00010000 /* 8-32 argb bit/pixel, A=7:0, R=23:16, G=15:8, B=7:0 */
#define AGL_RGB101010_BIT 0x00020000 /* 32 rgb bit/pixel, R=29:20, G=19:10, B=9:0 */
#define AGL_ARGB2101010_BIT 0x00040000 /* 32 argb bit/pixel, A=31:30 R=29:20, G=19:10, B=9:0 */
#define AGL_RGB101010_A8_BIT 0x00080000 /* 8-32 argb bit/pixel, A=7:0 R=29:20, G=19:10, B=9:0 */
#define AGL_RGB121212_BIT 0x00100000 /* 48 rgb bit/pixel, R=35:24, G=23:12, B=11:0 */
#define AGL_ARGB12121212_BIT 0x00200000 /* 48 argb bit/pixel, A=47:36, R=35:24, G=23:12, B=11:0 */
#define AGL_RGB161616_BIT 0x00400000 /* 64 rgb bit/pixel, R=47:32, G=31:16, B=15:0 */
#define AGL_ARGB16161616_BIT 0x00800000 /* 64 argb bit/pixel, A=63:48, R=47:32, G=31:16, B=15:0 */
#define AGL_INDEX8_BIT 0x20000000 /* 8 bit color look up table (deprecated) */
#define AGL_INDEX16_BIT 0x40000000 /* 16 bit color look up table (deprecated) */
#define AGL_RGBFLOAT64_BIT 0x01000000 /* 64 rgb bit/pixel, half float */
#define AGL_RGBAFLOAT64_BIT 0x02000000 /* 64 argb bit/pixel, half float */
#define AGL_RGBFLOAT128_BIT 0x04000000 /* 128 rgb bit/pixel, ieee float */
#define AGL_RGBAFLOAT128_BIT 0x08000000 /* 128 argb bit/pixel, ieee float */
#define AGL_RGBFLOAT256_BIT 0x10000000 /* 256 rgb bit/pixel, ieee double */
#define AGL_RGBAFLOAT256_BIT 0x20000000 /* 256 argb bit/pixel, ieee double */
/*
** Error return values from aglGetError.
*/
#define AGL_NO_ERROR 0 /* no error */
#define AGL_BAD_ATTRIBUTE 10000 /* invalid pixel format attribute */
#define AGL_BAD_PROPERTY 10001 /* invalid renderer property */
#define AGL_BAD_PIXELFMT 10002 /* invalid pixel format */
#define AGL_BAD_RENDINFO 10003 /* invalid renderer info */
#define AGL_BAD_CONTEXT 10004 /* invalid context */
#define AGL_BAD_DRAWABLE 10005 /* invalid drawable */
#define AGL_BAD_GDEV 10006 /* invalid graphics device */
#define AGL_BAD_STATE 10007 /* invalid context state */
#define AGL_BAD_VALUE 10008 /* invalid numerical value */
#define AGL_BAD_MATCH 10009 /* invalid share context */
#define AGL_BAD_ENUM 10010 /* invalid enumerant */
#define AGL_BAD_OFFSCREEN 10011 /* invalid offscreen drawable */
#define AGL_BAD_FULLSCREEN 10012 /* invalid offscreen drawable */
#define AGL_BAD_WINDOW 10013 /* invalid window */
#define AGL_BAD_POINTER 10014 /* invalid pointer */
#define AGL_BAD_MODULE 10015 /* invalid code module */
#define AGL_BAD_ALLOC 10016 /* memory allocation failure */
#define AGL_BAD_CONNECTION 10017 /* invalid CoreGraphics connection */
#define AGL_INVALID_FUNCTION 10018 /* invalid 64 bit function use */
/************************************************************************/
/*
** Pixel format functions
*
* Note:
* aglDevicesOfPixelFormat has been deprecated use aglDisplaysOfPixelFormat
*/
extern AGLPixelFormat aglChoosePixelFormat(const void *gdevs, GLint ndev, const GLint *attribs) OPENGL_DEPRECATED(10_0, 10_9);
extern AGLPixelFormat aglCreatePixelFormat(const GLint *attribs) OPENGL_DEPRECATED(10_5, 10_9);
extern void aglDestroyPixelFormat(AGLPixelFormat pix) OPENGL_DEPRECATED(10_0, 10_9);
extern AGLPixelFormat aglNextPixelFormat(AGLPixelFormat pix) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglDescribePixelFormat(AGLPixelFormat pix, GLint attrib, GLint *value) OPENGL_DEPRECATED(10_0, 10_9);
extern CGDirectDisplayID *aglDisplaysOfPixelFormat(AGLPixelFormat pix, GLint *ndevs) OPENGL_DEPRECATED(10_5, 10_9);
extern GDHandle *aglDevicesOfPixelFormat(AGLPixelFormat pix, GLint *ndevs) OPENGL_DEPRECATED(10_0, 10_5);
/*
** Renderer information functions
*
* Note:
* aglQueryRendererInfo has been deprecated use aglQueryRendererInfoForCGDirectDisplayIDs
*/
extern AGLRendererInfo aglQueryRendererInfoForCGDirectDisplayIDs(const CGDirectDisplayID *dspIDs, GLint ndev) OPENGL_DEPRECATED(10_5, 10_9);
extern void aglDestroyRendererInfo(AGLRendererInfo rend) OPENGL_DEPRECATED(10_0, 10_9);
extern AGLRendererInfo aglNextRendererInfo(AGLRendererInfo rend) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglDescribeRenderer(AGLRendererInfo rend, GLint prop, GLint *value) OPENGL_DEPRECATED(10_0, 10_9);
extern AGLRendererInfo aglQueryRendererInfo(const AGLDevice *gdevs, GLint ndev) OPENGL_DEPRECATED(10_0, 10_5);
/*
** Context functions
*/
extern AGLContext aglCreateContext(AGLPixelFormat pix, AGLContext share) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglDestroyContext(AGLContext ctx) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglCopyContext(AGLContext src, AGLContext dst, GLuint mask) OPENGL_DEPRECATED(10_0, 10_8);
extern GLboolean aglUpdateContext(AGLContext ctx) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Current state functions
*/
extern GLboolean aglSetCurrentContext(AGLContext ctx) OPENGL_DEPRECATED(10_0, 10_9);
extern AGLContext aglGetCurrentContext(void) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Drawable Functions
*
* Note:
* aglSetDrawable / aglGetDrawable have been deprecated use aglGetWindowRef or aglSetHIViewRef
*/
extern GLboolean aglSetDrawable(AGLContext ctx, AGLDrawable draw) OPENGL_DEPRECATED(10_0, 10_5);
extern AGLDrawable aglGetDrawable(AGLContext ctx) OPENGL_DEPRECATED(10_0, 10_5);
/*
** WindowRef Functions
*/
extern GLboolean aglSetWindowRef(AGLContext ctx, WindowRef window) OPENGL_DEPRECATED(10_5, 10_9);
extern WindowRef aglGetWindowRef(AGLContext ctx) OPENGL_DEPRECATED(10_5, 10_9);
/*
** HIViewRef Functions
*/
extern GLboolean aglSetHIViewRef(AGLContext ctx, HIViewRef hiview) OPENGL_DEPRECATED(10_5, 10_9);
extern HIViewRef aglGetHIViewRef(AGLContext ctx) OPENGL_DEPRECATED(10_5, 10_9);
/*
** OffScreen buffer Function
*/
extern GLboolean aglSetOffScreen(AGLContext ctx, GLsizei width, GLsizei height, GLsizei rowbytes, GLvoid *baseaddr) OPENGL_DEPRECATED(10_0, 10_7);
/*
** FullScreen Function
*/
extern GLboolean aglSetFullScreen(AGLContext ctx, GLsizei width, GLsizei height, GLsizei freq, GLint device) OPENGL_DEPRECATED(10_0, 10_6);
/*
** Virtual screen functions
*/
extern GLboolean aglSetVirtualScreen(AGLContext ctx, GLint screen) OPENGL_DEPRECATED(10_0, 10_9);
extern GLint aglGetVirtualScreen(AGLContext ctx) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Obtain version numbers
*/
extern void aglGetVersion(GLint *major, GLint *minor) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Global library options
*/
extern GLboolean aglConfigure(GLenum pname, GLuint param) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Swap functions
*/
extern void aglSwapBuffers(AGLContext ctx) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Per context options
*/
extern GLboolean aglEnable(AGLContext ctx, GLenum pname) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglDisable(AGLContext ctx, GLenum pname) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglIsEnabled(AGLContext ctx, GLenum pname) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglSetInteger(AGLContext ctx, GLenum pname, const GLint *params) OPENGL_DEPRECATED(10_0, 10_9);
extern GLboolean aglGetInteger(AGLContext ctx, GLenum pname, GLint *params) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Font function
*
* Note:
* aglUseFont has been deprecated, no replacement available
*/
extern GLboolean aglUseFont(AGLContext ctx, GLint fontID, Style face, GLint size, GLint first, GLint count, GLint base) OPENGL_DEPRECATED(10_0, 10_5);
/*
** Error functions
*/
extern GLenum aglGetError(void) OPENGL_DEPRECATED(10_0, 10_9);
extern const GLubyte *aglErrorString(GLenum code) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Soft reset function
*/
extern void aglResetLibrary(void) OPENGL_DEPRECATED(10_0, 10_9);
/*
** Surface texture function
*
* Note:
* aglSurfaceTexture has been deprecated, use GL_EXT_framebuffer_object instead
*/
extern void aglSurfaceTexture (AGLContext context, GLenum target, GLenum internalformat, AGLContext surfacecontext) OPENGL_DEPRECATED(10_2, 10_5);
/*
** PBuffer functions
*
* Note:
* PBuffers have been deprecated, use GL_EXT_framebuffer_object instead
*/
extern GLboolean aglCreatePBuffer (GLint width, GLint height, GLenum target, GLenum internalFormat, GLint max_level, AGLPbuffer *pbuffer) OPENGL_DEPRECATED(10_3, 10_7);
extern GLboolean aglDestroyPBuffer (AGLPbuffer pbuffer) OPENGL_DEPRECATED(10_3, 10_7);
extern GLboolean aglDescribePBuffer (AGLPbuffer pbuffer, GLint *width, GLint *height, GLenum *target, GLenum *internalFormat, GLint *max_level) OPENGL_DEPRECATED(10_3, 10_7);
extern GLboolean aglTexImagePBuffer (AGLContext ctx, AGLPbuffer pbuffer, GLint source) OPENGL_DEPRECATED(10_3, 10_7);
/*
** Pbuffer Drawable Functions
*/
extern GLboolean aglSetPBuffer (AGLContext ctx, AGLPbuffer pbuffer, GLint face, GLint level, GLint screen) OPENGL_DEPRECATED(10_3, 10_7);
extern GLboolean aglGetPBuffer (AGLContext ctx, AGLPbuffer *pbuffer, GLint *face, GLint *level, GLint *screen) OPENGL_DEPRECATED(10_3, 10_7);
/*
** CGL functions
*/
extern GLboolean aglGetCGLContext(AGLContext ctx, void **cgl_ctx) OPENGL_DEPRECATED(10_4, 10_9);
extern GLboolean aglGetCGLPixelFormat(AGLPixelFormat pix, void **cgl_pix) OPENGL_DEPRECATED(10_4, 10_9);
#ifdef __cplusplus
}
#endif
#endif /* _AGL_H */

View file

@ -1,48 +0,0 @@
/*
File: AGL/aglContext.h
Contains: Data type for internal contexts, for use with internal renderer interface.
Version: Technology: Mac OS 9
Release: GM
Copyright: (c) 2000-2010 Apple, Inc. All rights reserved.
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef _AGLCONTEXT_H
#define _AGLCONTEXT_H
#include <OpenGL/OpenGLAvailability.h>
#include <OpenGL/gliContext.h>
#include <OpenGL/gliDispatch.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
** Opaque declaration for private AGLContext data.
*/
typedef struct __AGLPrivateRec *AGLPrivate OPENGL_DEPRECATED(10_0, 10_9);
/*
** AGLContext structure.
*/
struct __AGLContextRec {
GLIContext rend;
GLIFunctionDispatch disp;
AGLPrivate priv;
} OPENGL_DEPRECATED(10_0, 10_9);
#ifdef __cplusplus
}
#endif
#endif /* _AGLCONTEXT_H */

View file

@ -1,48 +0,0 @@
/*
File: AGL/aglRenderers.h
Contains: Constant values for built-in AGL renderers.
Version: Technology: Mac OS X
Release: GM
Copyright: (c) 2000-2010 Apple, Inc. All rights reserved.
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef _AGLRENDERERS_H
#define _AGLRENDERERS_H
/*
** Renderer ID numbers
*/
#define AGL_RENDERER_GENERIC_ID 0x00020200
#define AGL_RENDERER_GENERIC_FLOAT_ID 0x00020400
#define AGL_RENDERER_APPLE_SW_ID 0x00020600
#define AGL_RENDERER_ATI_RAGE_128_ID 0x00021000
#define AGL_RENDERER_ATI_RADEON_ID 0x00021200
#define AGL_RENDERER_ATI_RAGE_PRO_ID 0x00021400
#define AGL_RENDERER_ATI_RADEON_8500_ID 0x00021600
#define AGL_RENDERER_ATI_RADEON_9700_ID 0x00021800
#define AGL_RENDERER_ATI_RADEON_X1000_ID 0x00021900
#define AGL_RENDERER_ATI_RADEON_X2000_ID 0x00021A00
#define AGL_RENDERER_NVIDIA_GEFORCE_2MX_ID 0x00022000 /* also for GeForce 4MX */
#define AGL_RENDERER_NVIDIA_GEFORCE_3_ID 0x00022200 /* also for GeForce 4 Ti */
#define AGL_RENDERER_NVIDIA_GEFORCE_FX_ID 0x00022400 /* also for GeForce 6xxx, 7xxx */
#define AGL_RENDERER_NVIDIA_GEFORCE_8XXX_ID 0x00022600 /* also for GeForce 9xxx */
#define AGL_RENDERER_VT_BLADE_XP2_ID 0x00023000
#define AGL_RENDERER_INTEL_900_ID 0x00024000
#define AGL_RENDERER_INTEL_X3100_ID 0x00024200
#define AGL_RENDERER_MESA_3DFX_ID 0x00040000
/* deprecated for Mac OS X, use above instead for specific renderer*/
/* AGL_RENDERER_ATI_ID */
/* AGL_RENDERER_NVIDIA_ID */
/* AGL_RENDERER_FORMAC_ID */
/* AGL_RENDERER_3DFX_ID */
#endif /* _AGLRENDERERS_H */

View file

@ -1,137 +0,0 @@
/*
File: AGL/glm.h
Contains: Basic GLMemoryLibrary data types, constants and function prototypes.
Version: Technology: Mac OS X
Release: GM
Copyright: (c) 2000-2010 Apple, Inc. All rights reserved.
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef _GLM_H
#define _GLM_H
#include <OpenGL/OpenGLAvailability.h>
#include <OpenGL/gl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
** Interface version
*/
#define GLM_VERSION_2_0 1
/*
** Mode types - glmSetMode
*/
#define GLM_OVERRIDE_MODE 0x0001
#define GLM_SYSTEM_HEAP_MODE 0x0002
#define GLM_APPLICATION_HEAP_MODE 0x0003
#define GLM_MULTIPROCESSOR_MODE 0x0004
/*
** Function types - glmSetFunc
*/
#define GLM_PAGE_ALLOCATION_FUNC_PTR 0x0001
#define GLM_PAGE_FREE_FUNC_PTR 0x0002
#define GLM_ZERO_FUNC_PTR 0x0003
#define GLM_COPY_FUNC_PTR 0x0004
#define GLM_SET_UBYTE_FUNC_PTR 0x0005
#define GLM_SET_USHORT_FUNC_PTR 0x0006
#define GLM_SET_UINT_FUNC_PTR 0x0007
#define GLM_SET_DOUBLE_FUNC_PTR 0x0008
/*
** Integer types - glmSetInteger
*/
#define GLM_PAGE_SIZE 0x0001
/*
** Integer types - glmGetInteger
*/
/*#define GLM_PAGE_SIZE 0x0001*/
#define GLM_NUMBER_PAGES 0x0002
#define GLM_CURRENT_MEMORY 0x0003
#define GLM_MAXIMUM_MEMORY 0x0004
/*
** Integer types - glmGetError
*/
#define GLM_NO_ERROR 0
#define GLM_INVALID_ENUM 0x0001
#define GLM_INVALID_VALUE 0x0002
#define GLM_INVALID_OPERATION 0x0003
#define GLM_OUT_OF_MEMORY 0x0004
/*
** Function pointer types
*/
typedef GLvoid *(*GLMPageAllocFunc)(GLsizei size) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMPageFreeFunc)(GLvoid *ptr) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMZeroFunc)(GLubyte *buffer, GLsizei width, GLsizei height, GLsizei skip) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMCopyFunc)(const GLubyte *src, GLubyte *dst, GLsizei width, GLsizei height, GLsizei src_skip, GLsizei dst_skip) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMSetUByteFunc)(GLubyte *buffer, GLsizei width, GLsizei height, GLsizei skip, GLubyte value) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMSetUShortFunc)(GLushort *buffer, GLsizei width, GLsizei height, GLsizei skip, GLushort value) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMSetUIntFunc)(GLuint *buffer, GLsizei width, GLsizei height, GLsizei skip, GLuint value) OPENGL_DEPRECATED(10_2, 10_9);
typedef void (*GLMSetDoubleFunc)(GLdouble *buffer, GLsizei width, GLsizei height, GLsizei skip, GLdouble value) OPENGL_DEPRECATED(10_2, 10_9);
typedef union {
GLMPageAllocFunc page_alloc_func;
GLMPageFreeFunc page_free_func;
GLMZeroFunc zero_func;
GLMCopyFunc copy_func;
GLMSetUByteFunc set_ubyte_func;
GLMSetUShortFunc set_ushort_func;
GLMSetUIntFunc set_uint_func;
GLMSetDoubleFunc set_double_func;
} GLMfunctions OPENGL_DEPRECATED(10_2, 10_9);
/*
** Prototypes
*/
extern void glmSetMode(GLenum mode) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmSetFunc(GLenum type, GLMfunctions func) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmSetInteger(GLenum param, GLint value) OPENGL_DEPRECATED(10_2, 10_9);
extern GLint glmGetInteger(GLenum param) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmPageFreeAll(void) OPENGL_DEPRECATED(10_2, 10_9);
extern GLvoid *glmMalloc(GLsizei size) OPENGL_DEPRECATED(10_2, 10_9);
extern GLvoid *glmCalloc(GLsizei nmemb, GLsizei size) OPENGL_DEPRECATED(10_2, 10_9);
extern GLvoid *glmRealloc(GLvoid *ptr, GLsizei size) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmFree(GLvoid *ptr) OPENGL_DEPRECATED(10_2, 10_9);
/* 16 byte aligned */
extern GLvoid *glmVecAlloc(GLsizei size) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmVecFree(GLvoid *ptr) OPENGL_DEPRECATED(10_2, 10_9);
/* 32 byte aligned and 32 byte padded */
extern GLvoid *glmDCBAlloc(GLsizei size) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmDCBFree(GLvoid *ptr) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmZero(GLubyte *buffer, GLsizei width, GLsizei height, GLsizei rowbytes) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmCopy(const GLubyte *src, GLubyte *dst, GLsizei width, GLsizei height, GLsizei src_rowbytes, GLsizei dst_rowbytes) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmSetUByte(GLubyte *buffer, GLsizei width, GLsizei height, GLsizei row_elems, GLubyte value) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmSetUShort(GLushort *buffer, GLsizei width, GLsizei height, GLsizei row_elems, GLushort value) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmSetUInt(GLuint *buffer, GLsizei width, GLsizei height, GLsizei row_elems, GLuint value) OPENGL_DEPRECATED(10_2, 10_9);
extern void glmSetDouble(GLdouble *buffer, GLsizei width, GLsizei height, GLsizei row_elems, GLdouble value) OPENGL_DEPRECATED(10_2, 10_9);
extern GLenum glmGetError(void) OPENGL_DEPRECATED(10_2, 10_9);
#ifdef __cplusplus
}
#endif
#endif /* _GLM_H */

View file

@ -1,25 +0,0 @@
framework module AGL [extern_c] {
header "agl.h"
export *
explicit module Context {
header "aglContext.h"
export *
}
explicit module Macro {
header "aglMacro.h"
export *
}
explicit module Renderers {
header "aglRenderers.h"
export *
}
explicit module GLM {
header "glm.h"
export *
}
}

View file

@ -1 +0,0 @@
Versions/Current/AVFoundation.tbd

View file

@ -1 +0,0 @@
Versions/Current/Frameworks

View file

@ -1 +0,0 @@
Versions/Current/Headers

View file

@ -1 +0,0 @@
Versions/Current/Modules

View file

@ -1,46 +0,0 @@
--- !tapi-tbd-v2
archs: [ i386, x86_64 ]
uuids: [ 'i386: CE7BBB12-8634-3E8B-8B85-652815F43BBE', 'x86_64: 42633BC1-2DDA-3721-84A2-1506808C8525' ]
platform: macosx
install-name: /System/Library/Frameworks/AVFoundation.framework/Versions/A/Frameworks/AVFAudio.framework/Versions/A/AVFAudio
objc-constraint: none
exports:
- archs: [ x86_64 ]
objc-ivars: [ _AVAudioBuffer._impl, _AVAudioEndpointDetector._impl, _AVAudioNode._impl,
_AVAudioSequencer._impl, _AVAudioUnitComponent._impl, _AVAudioUnitComponentManager._impl,
_AVMIDIPlayer._impl, _AVMusicTrack._impl ]
- archs: [ i386, x86_64 ]
symbols: [ _AVAudioBitRateStrategy_Constant, _AVAudioBitRateStrategy_LongTermAverage,
_AVAudioBitRateStrategy_Variable, _AVAudioBitRateStrategy_VariableConstrained,
_AVAudioEngineConfigurationChangeNotification, _AVAudioFileTypeKey,
_AVAudioUnitComponentTagsDidChangeNotification, _AVAudioUnitManufacturerNameApple,
_AVAudioUnitTypeEffect, _AVAudioUnitTypeFormatConverter, _AVAudioUnitTypeGenerator,
_AVAudioUnitTypeMIDIProcessor, _AVAudioUnitTypeMixer, _AVAudioUnitTypeMusicDevice,
_AVAudioUnitTypeMusicEffect, _AVAudioUnitTypeOfflineEffect,
_AVAudioUnitTypeOutput, _AVAudioUnitTypePanner, _AVChannelLayoutKey,
_AVEncoderAudioQualityForVBRKey, _AVEncoderAudioQualityKey,
_AVEncoderBitDepthHintKey, _AVEncoderBitRateKey, _AVEncoderBitRatePerChannelKey,
_AVEncoderBitRateStrategyKey, _AVFormatIDKey, _AVLinearPCMBitDepthKey,
_AVLinearPCMIsBigEndianKey, _AVLinearPCMIsFloatKey, _AVLinearPCMIsNonInterleaved,
_AVNumberOfChannelsKey, _AVSampleRateConverterAlgorithmKey,
_AVSampleRateConverterAlgorithm_Mastering, _AVSampleRateConverterAlgorithm_MinimumPhase,
_AVSampleRateConverterAlgorithm_Normal, _AVSampleRateConverterAudioQualityKey,
_AVSampleRateKey, _AVVoiceActivationDeviceIDKey, _AVVoiceActivationModeKey,
_AVVoiceControllerMetricAudioSessionSetActiveTime, _AVVoiceControllerMetricAudioSessionSetInactiveTime,
_AVVoiceControllerMetricDataBeginHostTime, _AVVoiceControllerMetricDataDurationMilliSeconds,
_AVVoiceControllerMetricDataEndHostTime, _AVVoiceControllerNoRecordRoute ]
objc-classes: [ _AVAudioBuffer, _AVAudioChannelLayout, _AVAudioCompressedBuffer,
_AVAudioConnectionPoint, _AVAudioConverter, _AVAudioEndpointDetector,
_AVAudioEngine, _AVAudioEnvironmentDistanceAttenuationParameters,
_AVAudioEnvironmentNode, _AVAudioEnvironmentReverbParameters,
_AVAudioFile, _AVAudioFormat, _AVAudioIONode, _AVAudioInputNode,
_AVAudioMixerNode, _AVAudioMixingDestination, _AVAudioNode,
_AVAudioOutputNode, _AVAudioPCMBuffer, _AVAudioPlayer, _AVAudioPlayerNode,
_AVAudioRecorder, _AVAudioSequencer, _AVAudioSession, _AVAudioTime,
_AVAudioUnit, _AVAudioUnitComponent, _AVAudioUnitComponentManager,
_AVAudioUnitDelay, _AVAudioUnitDistortion, _AVAudioUnitEQ,
_AVAudioUnitEQFilterParameters, _AVAudioUnitEffect, _AVAudioUnitGenerator,
_AVAudioUnitMIDIInstrument, _AVAudioUnitReverb, _AVAudioUnitSampler,
_AVAudioUnitTimeEffect, _AVAudioUnitTimePitch, _AVAudioUnitVarispeed,
_AVMIDIPlayer, _AVMusicTrack, _AVVCAudioBuffer, _AVVoiceController ]
...

View file

@ -1,233 +0,0 @@
/*
File: AVAudioBuffer.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAudioFormat;
/*!
@class AVAudioBuffer
@abstract A buffer of audio data, with a format.
@discussion
AVAudioBuffer represents a buffer of audio data and its format.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioBuffer : NSObject <NSCopying, NSMutableCopying> {
@protected
void *_impl;
}
/*!
@property format
@abstract The format of the audio in the buffer.
*/
@property (nonatomic, readonly) AVAudioFormat *format;
/*! @property audioBufferList
@abstract The buffer's underlying AudioBufferList.
@discussion
For compatibility with lower-level CoreAudio and AudioToolbox API's, this method accesses
the buffer implementation's internal AudioBufferList. The buffer list structure must
not be modified, though you may modify buffer contents.
The mDataByteSize fields of this AudioBufferList express the buffer's current frameLength.
*/
@property (nonatomic, readonly) const AudioBufferList *audioBufferList;
/*! @property mutableAudioBufferList
@abstract A mutable version of the buffer's underlying AudioBufferList.
@discussion
Some lower-level CoreAudio and AudioToolbox API's require a mutable AudioBufferList,
for example, AudioConverterConvertComplexBuffer.
The mDataByteSize fields of this AudioBufferList express the buffer's current frameCapacity.
If they are altered, you should modify the buffer's frameLength to match.
*/
@property (nonatomic, readonly) AudioBufferList *mutableAudioBufferList;
@end
// -------------------------------------------------------------------------------------------------
/*!
@class AVAudioPCMBuffer
@abstract A subclass of AVAudioBuffer for use with PCM audio formats.
@discussion
AVAudioPCMBuffer provides a number of methods useful for manipulating buffers of
audio in PCM format.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioPCMBuffer : AVAudioBuffer
/*! @method initWithPCMFormat:frameCapacity:
@abstract Initialize a buffer that is to contain PCM audio samples.
@param format
The format of the PCM audio to be contained in the buffer.
@param frameCapacity
The capacity of the buffer in PCM sample frames.
@discussion
An exception is raised if the format is not PCM.
Returns nil in the following cases:
- if the format has zero bytes per frame (format.streamDescription->mBytesPerFrame == 0)
- if the buffer byte capacity (frameCapacity * format.streamDescription->mBytesPerFrame)
cannot be represented by an uint32_t
*/
- (nullable instancetype)initWithPCMFormat:(AVAudioFormat *)format frameCapacity:(AVAudioFrameCount)frameCapacity NS_DESIGNATED_INITIALIZER;
/*! @property frameCapacity
@abstract
The buffer's capacity, in audio sample frames.
*/
@property (nonatomic, readonly) AVAudioFrameCount frameCapacity;
/*! @property frameLength
@abstract The current number of valid sample frames in the buffer.
@discussion
You may modify the length of the buffer as part of an operation that modifies its contents.
The length must be less than or equal to the frameCapacity. Modifying frameLength will update
the mDataByteSize in each of the underlying AudioBufferList's AudioBuffer's correspondingly,
and vice versa. Note that in the case of deinterleaved formats, mDataByteSize will refers
the size of one channel's worth of audio samples.
*/
@property (nonatomic) AVAudioFrameCount frameLength;
/*! @property stride
@abstract The buffer's number of interleaved channels.
@discussion
Useful in conjunction with floatChannelData etc.
*/
@property (nonatomic, readonly) NSUInteger stride;
/*! @property floatChannelData
@abstract Access the buffer's float audio samples.
@discussion
floatChannelData returns pointers to the buffer's audio samples if the buffer's format is
32-bit float, or nil if it is another format.
The returned pointer is to format.channelCount pointers to float. Each of these pointers
is to "frameLength" valid samples, which are spaced by "stride" samples.
If format.interleaved is false (as with the standard deinterleaved float format), then
the pointers will be to separate chunks of memory. "stride" is 1.
If format.interleaved is true, then the pointers will refer into the same chunk of interleaved
samples, each offset by 1 frame. "stride" is the number of interleaved channels.
*/
@property (nonatomic, readonly) float * __nonnull const * __nullable floatChannelData;
/*! @property int16ChannelData
@abstract Access the buffer's int16_t audio samples.
@discussion
int16ChannelData returns the buffer's audio samples if the buffer's format has 2-byte
integer samples, or nil if it is another format.
See the discussion of floatChannelData.
*/
@property (nonatomic, readonly) int16_t * __nonnull const * __nullable int16ChannelData;
/*! @property int32ChannelData
@abstract Access the buffer's int32_t audio samples.
@discussion
int32ChannelData returns the buffer's audio samples if the buffer's format has 4-byte
integer samples, or nil if it is another format.
See the discussion of floatChannelData.
*/
@property (nonatomic, readonly) int32_t * __nonnull const * __nullable int32ChannelData;
@end
// -------------------------------------------------------------------------------------------------
/*!
@class AVAudioCompressedBuffer
@abstract A subclass of AVAudioBuffer for use with compressed audio formats.
*/
NS_CLASS_AVAILABLE(10_11, 9_0)
@interface AVAudioCompressedBuffer : AVAudioBuffer
/*! @method initWithFormat:packetCapacity:maximumPacketSize:
@abstract Initialize a buffer that is to contain compressed audio data.
@param format
The format of the audio to be contained in the buffer.
@param packetCapacity
The capacity of the buffer in packets.
@param maximumPacketSize
The maximum size in bytes of a compressed packet.
The maximum packet size can be obtained from the maximumOutputPacketSize property of an AVAudioConverter configured for encoding this format.
@discussion
An exception is raised if the format is PCM.
*/
- (instancetype)initWithFormat:(AVAudioFormat *)format packetCapacity:(AVAudioPacketCount)packetCapacity maximumPacketSize:(NSInteger)maximumPacketSize;
/*! @method initWithFormat:packetCapacity:
@abstract Initialize a buffer that is to contain constant bytes per packet compressed audio data.
@param format
The format of the audio to be contained in the buffer.
@param packetCapacity
The capacity of the buffer in packets.
@discussion
This fails if the format is PCM or if the format has variable bytes per packet (format.streamDescription->mBytesPerPacket == 0).
*/
- (instancetype)initWithFormat:(AVAudioFormat *)format packetCapacity:(AVAudioPacketCount)packetCapacity;
/*! @property packetCapacity
@abstract
The number of compressed packets the buffer can contain.
*/
@property (nonatomic, readonly) AVAudioPacketCount packetCapacity;
/*! @property packetCount
@abstract The current number of compressed packets in the buffer.
@discussion
You may modify the packet length as part of an operation that modifies its contents.
The packet length must be less than or equal to the packetCapacity.
*/
@property (nonatomic) AVAudioPacketCount packetCount;
/*! @property maximumPacketSize
@abstract The maximum size of a compressed packet in bytes.
*/
@property (nonatomic, readonly) NSInteger maximumPacketSize;
/*! @property data
@abstract Access the buffer's data bytes.
*/
@property (nonatomic, readonly) void *data;
/*!
@property byteCapacity
@abstract The buffer's capacity in bytes
*/
@property (nonatomic, readonly) uint32_t byteCapacity API_AVAILABLE(macosx(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*!
@property byteLength
@abstract The current number of valid bytes in the buffer.
@discussion
Can be changed as part of an operation that modifies the contents.
*/
@property (nonatomic) uint32_t byteLength API_AVAILABLE(macosx(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property packetDescriptions
@abstract Access the buffer's array of packet descriptions, if any.
@discussion
If the format has constant bytes per packet (format.streamDescription->mBytesPerPacket != 0), then this will return nil.
*/
@property (nonatomic, readonly, nullable) AudioStreamPacketDescription *packetDescriptions;
@end
NS_ASSUME_NONNULL_END
// -------------------------------------------------------------------------------------------------

View file

@ -1,85 +0,0 @@
/*
File: AVAudioChannelLayout.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#import <CoreAudio/CoreAudioTypes.h>
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAudioChannelLayout
@abstract A description of the roles of a set of audio channels.
@discussion
This object is a thin wrapper for the AudioChannelLayout structure, described
in <CoreAudio/CoreAudioTypes.h>.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioChannelLayout : NSObject <NSSecureCoding> {
@private
AudioChannelLayoutTag _layoutTag;
AudioChannelLayout * _layout;
void *_reserved;
}
- (instancetype)init NS_UNAVAILABLE;
/*! @method initWithLayoutTag:
@abstract Initialize from a layout tag.
@param layoutTag
The tag.
@discussion
Returns nil if the tag is either kAudioChannelLayoutTag_UseChannelDescriptions or
kAudioChannelLayoutTag_UseChannelBitmap.
*/
- (nullable instancetype)initWithLayoutTag:(AudioChannelLayoutTag)layoutTag;
/*! @method initWithLayout:
@abstract Initialize from an AudioChannelLayout.
@param layout
The AudioChannelLayout.
@discussion
If the provided layout's tag is kAudioChannelLayoutTag_UseChannelDescriptions, this
initializer attempts to convert it to a more specific tag.
*/
- (instancetype)initWithLayout:(const AudioChannelLayout *)layout NS_DESIGNATED_INITIALIZER;
/*! @method isEqual:
@abstract Determine whether another AVAudioChannelLayout is exactly equal to this layout.
@param object
The AVAudioChannelLayout to compare against.
@discussion
The underlying AudioChannelLayoutTag and AudioChannelLayout are compared for equality.
*/
- (BOOL)isEqual:(id)object;
/*! @method layoutWithLayoutTag:
@abstract Create from a layout tag.
*/
+ (instancetype)layoutWithLayoutTag:(AudioChannelLayoutTag)layoutTag;
/*! @method layoutWithLayout:
@abstract Create from an AudioChannelLayout
*/
+ (instancetype)layoutWithLayout:(const AudioChannelLayout *)layout;
/*! @property layoutTag
@abstract The layout's tag. */
@property (nonatomic, readonly) AudioChannelLayoutTag layoutTag;
/*! @property layout
@abstract The underlying AudioChannelLayout. */
@property (nonatomic, readonly) const AudioChannelLayout *layout;
/*! @property channelCount
@abstract The number of channels of audio data.
*/
@property (nonatomic, readonly) AVAudioChannelCount channelCount;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,54 +0,0 @@
/*
File: AVAudioConnectionPoint.h
Framework: AVFoundation
Copyright (c) 2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAudioNode;
/*! @class AVAudioConnectionPoint
@abstract A representation of either a source or destination connection point in AVAudioEngine.
@discussion
AVAudioConnectionPoint describes either a source or destination connection point (node, bus)
in AVAudioEngine's graph.
Instances of this class are immutable.
*/
NS_CLASS_AVAILABLE(10_11, 9_0)
@interface AVAudioConnectionPoint : NSObject {
@private
AVAudioNode *_node;
AVAudioNodeBus _bus;
void *_reserved;
}
/*! @method initWithNode:bus:
@abstract Create a connection point object.
@param node the source or destination node
@param bus the output or input bus on the node
@discussion
If the node is nil, this method fails (returns nil).
*/
- (instancetype)initWithNode:(AVAudioNode *)node bus:(AVAudioNodeBus)bus NS_DESIGNATED_INITIALIZER;
- (instancetype)init NS_UNAVAILABLE;
/*! @property node
@abstract Returns the node in the connection point.
*/
@property (nonatomic, readonly, weak) AVAudioNode *node;
/*! @property bus
@abstract Returns the bus on the node in the connection point.
*/
@property (nonatomic, readonly) AVAudioNodeBus bus;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,321 +0,0 @@
/*
File: AVAudioConverter.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#import <AVFAudio/AVAudioFormat.h>
#import <AVFAudio/AVAudioBuffer.h>
NS_ASSUME_NONNULL_BEGIN
/*! @enum AVAudioConverterPrimeMethod
@abstract values for the primeMethod property. See further discussion under AVAudioConverterPrimeInfo.
AVAudioConverterPrimeMethod_Pre
Primes with leading + trailing input frames.
AVAudioConverterPrimeMethod_Normal
Only primes with trailing (zero latency). Leading frames are assumed to be silence.
AVAudioConverterPrimeMethod_None
Acts in "latency" mode. Both leading and trailing frames assumed to be silence.
*/
typedef NS_ENUM(NSInteger, AVAudioConverterPrimeMethod) {
AVAudioConverterPrimeMethod_Pre = 0,
AVAudioConverterPrimeMethod_Normal = 1,
AVAudioConverterPrimeMethod_None = 2
};
/*!
@struct AVAudioConverterPrimeInfo
@abstract This struct is the value of the primeInfo property and specifies priming information.
@field leadingFrames
Specifies the number of leading (previous) input frames, relative to the normal/desired
start input frame, required by the converter to perform a high quality conversion. If
using AVAudioConverterPrimeMethod_Pre, the client should "pre-seek" the input stream provided
through the input proc by leadingFrames. If no frames are available previous to the
desired input start frame (because, for example, the desired start frame is at the very
beginning of available audio), then provide "leadingFrames" worth of initial zero frames
in the input proc. Do not "pre-seek" in the default case of
AVAudioConverterPrimeMethod_Normal or when using AVAudioConverterPrimeMethod_None.
@field trailingFrames
Specifies the number of trailing input frames (past the normal/expected end input frame)
required by the converter to perform a high quality conversion. The client should be
prepared to provide this number of additional input frames except when using
AVAudioConverterPrimeMethod_None. If no more frames of input are available in the input stream
(because, for example, the desired end frame is at the end of an audio file), then zero
(silent) trailing frames will be synthesized for the client.
@discussion
When using convertToBuffer:error:withInputFromBlock: (either a single call or a series of calls), some
conversions, particularly involving sample-rate conversion, ideally require a certain
number of input frames previous to the normal start input frame and beyond the end of
the last expected input frame in order to yield high-quality results.
These are expressed in the leadingFrames and trailingFrames members of the structure.
The very first call to convertToBuffer:error:withInputFromBlock:, or first call after
reset, will request additional input frames beyond those normally
expected in the input proc callback to fulfill this first AudioConverterFillComplexBuffer()
request. The number of additional frames requested, depending on the prime method, will
be approximately:
<pre>
AVAudioConverterPrimeMethod_Pre leadingFrames + trailingFrames
AVAudioConverterPrimeMethod_Normal trailingFrames
AVAudioConverterPrimeMethod_None 0
</pre>
Thus, in effect, the first input proc callback(s) may provide not only the leading
frames, but also may "read ahead" by an additional number of trailing frames depending
on the prime method.
AVAudioConverterPrimeMethod_None is useful in a real-time application processing live input,
in which case trailingFrames (relative to input sample rate) of through latency will be
seen at the beginning of the output of the AudioConverter. In other real-time
applications such as DAW systems, it may be possible to provide these initial extra
audio frames since they are stored on disk or in memory somewhere and
AVAudioConverterPrimeMethod_Pre may be preferable. The default method is
AVAudioConverterPrimeMethod_Normal, which requires no pre-seeking of the input stream and
generates no latency at the output.
*/
typedef struct AVAudioConverterPrimeInfo {
AVAudioFrameCount leadingFrames;
AVAudioFrameCount trailingFrames;
} AVAudioConverterPrimeInfo;
/*! @enum AVAudioConverterInputStatus
@abstract You must return one of these codes from your AVAudioConverterInputBlock.
AVAudioConverterInputStatus_HaveData
This is the normal case where you supply data to the converter.
AVAudioConverterInputStatus_NoDataNow
If you are out of data for now, set *ioNumberOfPackets = 0 and return AVAudioConverterInputStatus_NoDataNow and the
conversion routine will return as much output as could be converted with the input already supplied.
AVAudioConverterInputStatus_EndOfStream
If you are at the end of stream, set *ioNumberOfPackets = 0 and return AVAudioConverterInputStatus_EndOfStream.
*/
typedef NS_ENUM(NSInteger, AVAudioConverterInputStatus) {
AVAudioConverterInputStatus_HaveData = 0,
AVAudioConverterInputStatus_NoDataNow = 1,
AVAudioConverterInputStatus_EndOfStream = 2
} NS_ENUM_AVAILABLE(10_11, 9_0);
/*! @enum AVAudioConverterOutputStatus
@abstract These values are returned from convertToBuffer:error:withInputFromBlock:
AVAudioConverterOutputStatus_HaveData
All of the requested data was returned.
AVAudioConverterOutputStatus_InputRanDry
Not enough input was available to satisfy the request at the current time. The output buffer contains as much as could be converted.
AVAudioConverterOutputStatus_EndOfStream
The end of stream has been reached. No data was returned.
AVAudioConverterOutputStatus_Error
An error occurred.
*/
typedef NS_ENUM(NSInteger, AVAudioConverterOutputStatus) {
AVAudioConverterOutputStatus_HaveData = 0,
AVAudioConverterOutputStatus_InputRanDry = 1,
AVAudioConverterOutputStatus_EndOfStream = 2,
AVAudioConverterOutputStatus_Error = 3
} NS_ENUM_AVAILABLE(10_11, 9_0);
/*! @typedef AVAudioConverterInputBlock
@abstract A block which will be called by convertToBuffer:error:withInputFromBlock: to get input data as needed.
@param inNumberOfPackets
This will be the number of packets required to complete the request.
You may supply more or less that this amount. If less, then the input block will get called again.
@param outStatus
The block must set the appropriate AVAudioConverterInputStatus enum value.
If you have supplied data, set outStatus to AVAudioConverterInputStatus_HaveData and return an AVAudioBuffer.
If you are out of data for now, set outStatus to AVAudioConverterInputStatus_NoDataNow and return nil, and the
conversion routine will return as much output as could be converted with the input already supplied.
If you are at the end of stream, set outStatus to AVAudioConverterInputStatus_EndOfStream, and return nil.
@return
An AVAudioBuffer containing data to be converted, or nil if at end of stream or no data is available.
The data in the returned buffer must not be cleared or re-filled until the input block is called again or the conversion has finished.
@discussion
convertToBuffer:error:withInputFromBlock: will return as much output as could be converted with the input already supplied.
*/
typedef AVAudioBuffer * __nullable (^AVAudioConverterInputBlock)(AVAudioPacketCount inNumberOfPackets, AVAudioConverterInputStatus* outStatus);
/*!
@class AVAudioConverter
@abstract
AVAudioConverter converts streams of audio between various formats.
@discussion
*/
NS_CLASS_AVAILABLE(10_11, 9_0)
@interface AVAudioConverter : NSObject {
@private
void *_impl;
}
/*! @method initFromFormat:toFormat:
@abstract Initialize from input and output formats.
@param fromFormat
The input format.
@param toFormat
The output format.
@discussion
Returns nil if the format conversion is not possible.
*/
- (nullable instancetype)initFromFormat:(AVAudioFormat *)fromFormat toFormat:(AVAudioFormat *)toFormat;
/*! @method reset
@abstract Resets the converter so that a new stream may be converted.
*/
- (void)reset;
/*! @property inputFormat
@abstract The format of the input audio stream. (NB. AVAudioFormat includes the channel layout)
*/
@property (nonatomic, readonly) AVAudioFormat *inputFormat;
/*! @property outputFormat
@abstract The format of the output audio stream. (NB. AVAudioFormat includes the channel layout)
*/
@property (nonatomic, readonly) AVAudioFormat *outputFormat;
/*! @property channelMap
@abstract An array of integers indicating from which input to derive each output.
@discussion
The array has size equal to the number of output channels. Each element's value is
the input channel number, starting with zero, that is to be copied to that output. A negative value means
that the output channel will have no source and will be silent.
Setting a channel map overrides channel mapping due to any channel layouts in the input and output formats that may have been supplied.
*/
@property (nonatomic, retain) NSArray<NSNumber *> *channelMap;
/*! @property magicCookie
@abstract Decoders require some data in the form of a magicCookie in order to decode properly. Encoders will produce a magicCookie.
*/
@property (nonatomic, retain, nullable) NSData *magicCookie;
/*! @property downmix
@abstract If YES and channel remapping is necessary, then channels will be mixed as appropriate instead of remapped. Default value is NO.
*/
@property (nonatomic) BOOL downmix;
/*! @property dither
@abstract Setting YES will turn on dither, if dither makes sense in given the current formats and settings. Default value is NO.
*/
@property (nonatomic) BOOL dither;
/*! @property sampleRateConverterQuality
@abstract An AVAudioQuality value as defined in AVAudioSettings.h.
*/
@property (nonatomic) NSInteger sampleRateConverterQuality;
/*! @property sampleRateConverterAlgorithm
@abstract An AVSampleRateConverterAlgorithmKey value as defined in AVAudioSettings.h.
*/
@property (nonatomic, retain, nullable) NSString *sampleRateConverterAlgorithm;
/*! @property primeMethod
@abstract Indicates the priming method to be used by the sample rate converter or decoder.
*/
@property (nonatomic) AVAudioConverterPrimeMethod primeMethod;
/*! @property primeInfo
@abstract Indicates the the number of priming frames .
*/
@property (nonatomic) AVAudioConverterPrimeInfo primeInfo;
/*! @method convertToBuffer:fromBuffer:error:
@abstract Perform a simple conversion. That is, a conversion which does not involve codecs or sample rate conversion.
@param inputBuffer
The input buffer.
@param outputBuffer
The output buffer.
@param outError
An error if the conversion fails.
@return
YES is returned on success, NO when an error has occurred.
@discussion
The output buffer's frameCapacity should be at least at large as the inputBuffer's frameLength.
If the conversion involves a codec or sample rate conversion, you instead must use
convertToBuffer:error:withInputFromBlock:.
*/
- (BOOL)convertToBuffer:(AVAudioPCMBuffer *)outputBuffer fromBuffer:(const AVAudioPCMBuffer *)inputBuffer error:(NSError **)outError;
/*! @method convertToBuffer:error:withInputFromBlock:
@abstract Perform any supported conversion.
@param inputBlock
A block which will be called to get input data as needed. See description for AVAudioConverterInputBlock.
@param outputBuffer
The output buffer.
@param outError
An error if the conversion fails.
@return
An AVAudioConverterOutputStatus is returned.
@discussion
It attempts to fill the buffer to its capacity. On return, the buffer's length indicates the number of
sample frames successfully converted.
*/
- (AVAudioConverterOutputStatus)convertToBuffer:(AVAudioBuffer *)outputBuffer error:(NSError **)outError withInputFromBlock:(AVAudioConverterInputBlock)inputBlock;
@end
@interface AVAudioConverter (Encoding)
/*! @property bitRate
@abstract bitRate in bits per second. Only applies when encoding.
*/
@property (nonatomic) NSInteger bitRate;
/*! @property bitRateStrategy
@abstract When encoding, an AVEncoderBitRateStrategyKey value constant as defined in AVAudioSettings.h. Returns nil if not encoding.
*/
@property (nonatomic, retain, nullable) NSString *bitRateStrategy;
/*! @property maximumOutputPacketSize
@abstract The maximum size of an output packet, in bytes.
@discussion When encoding it is useful to know how large a packet can be in order to allocate a buffer to receive the output.
*/
@property (nonatomic, readonly) NSInteger maximumOutputPacketSize;
/*! @property availableEncodeBitRates
@abstract When encoding, an NSArray of NSNumber of all bit rates provided by the codec. Returns nil if not encoding.
*/
@property (nonatomic, readonly, nullable) NSArray<NSNumber *> *availableEncodeBitRates;
/*! @property applicableEncodeBitRates
@abstract When encoding, an NSArray of NSNumber of bit rates that can be applied based on the current formats and settings. Returns nil if not encoding.
*/
@property (nonatomic, readonly, nullable) NSArray<NSNumber *> *applicableEncodeBitRates;
/*! @property availableEncodeSampleRates
@abstract When encoding, an NSArray of NSNumber of all output sample rates provided by the codec. Returns nil if not encoding.
*/
@property (nonatomic, readonly, nullable) NSArray<NSNumber *> *availableEncodeSampleRates;
/*! @property applicableEncodeSampleRates
@abstract When encoding, an NSArray of NSNumber of output sample rates that can be applied based on the current formats and settings. Returns nil if not encoding.
*/
@property (nonatomic, readonly, nullable) NSArray<NSNumber *> *applicableEncodeSampleRates;
/*! @property availableEncodeChannelLayoutTags
@abstract When encoding, an NSArray of NSNumber of all output channel layout tags provided by the codec. Returns nil if not encoding.
*/
@property (nonatomic, readonly, nullable) NSArray<NSNumber *> *availableEncodeChannelLayoutTags;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,671 +0,0 @@
/*
File: AVAudioEngine.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#import <AVFAudio/AVAudioBuffer.h>
#import <AVFAudio/AVAudioConnectionPoint.h>
#import <AVFAudio/AVAudioIONode.h>
#import <AVFAudio/AVAudioTime.h>
#if __has_include(<AudioToolbox/MusicPlayer.h>)
#define AVAUDIOENGINE_HAVE_MUSICPLAYER 1
#import <AudioToolbox/MusicPlayer.h>
#endif
NS_ASSUME_NONNULL_BEGIN
@class AVAudioFormat, AVAudioNode, AVAudioInputNode, AVAudioOutputNode, AVAudioMixerNode;
/*! @enum AVAudioEngineManualRenderingError
@abstract
Error codes that could be returned from AVAudioEngine manual rendering mode methods,
e.g. `enableManualRenderingMode:format:maximumFrameCount:error:` and
`renderOffline:toBuffer:error:`.
Note that this is not a comprehensive list, and the underlying audio units could
return other error codes (e.g. see kAudioUnitErr_* in AudioToolbox/AUComponent.h) from these
methods as applicable.
AVAudioEngineManualRenderingErrorInvalidMode
The operation cannot be performed because the engine is either not in manual
rendering mode or the right variant of it.
AVAudioEngineManualRenderingErrorInitialized
The operation cannot be performed because the engine is initialized (i.e. not stopped).
AVAudioEngineManualRenderingErrorNotRunning
The operation cannot be performed because the engine is not running (i.e. not started).
*/
typedef NS_ENUM(OSStatus, AVAudioEngineManualRenderingError) {
AVAudioEngineManualRenderingErrorInvalidMode = -80800,
AVAudioEngineManualRenderingErrorInitialized = -80801,
AVAudioEngineManualRenderingErrorNotRunning = -80802
} API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @enum AVAudioEngineManualRenderingStatus
@abstract
Status codes returned from the render call to the engine operating in manual rendering mode.
AVAudioEngineManualRenderingStatusError
An error occurred when rendering and no data was returned. See the returned error code
for the description of the error.
AVAudioEngineManualRenderingStatusSuccess
All of the requested data was returned successfully.
AVAudioEngineManualRenderingStatusInsufficientDataFromInputNode
Applicable only to the input node, when it provides input data for rendering
(see `AVAudioInputNode(setManualRenderingInputPCMFormat:inputBlock:)`).
Indicates that not enough input data was returned by the input node to satisfy the
render request at the current time. The output buffer may contain data rendered by other
active sources in the engine's processing graph.
AVAudioEngineManualRenderingStatusCannotDoInCurrentContext
The operation could not be performed now, but the client could retry later if needed.
This is usually to guard a realtime render operation (e.g. rendering through
`manualRenderingBlock`) when a reconfiguration of the engine's internal state
is in progress.
*/
typedef NS_ENUM(NSInteger, AVAudioEngineManualRenderingStatus) {
AVAudioEngineManualRenderingStatusError = -1,
AVAudioEngineManualRenderingStatusSuccess = 0,
AVAudioEngineManualRenderingStatusInsufficientDataFromInputNode = 1,
AVAudioEngineManualRenderingStatusCannotDoInCurrentContext = 2
} API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @enum AVAudioEngineManualRenderingMode
@abstract
By default, the engine is connected to an audio device and automatically renders in realtime.
It can also be configured to operate in manual rendering mode, i.e. not connected to an
audio device and rendering in response to requests from the client.
AVAudioEngineManualRenderingModeOffline
The engine operates in an offline mode without any realtime constraints.
AVAudioEngineManualRenderingModeRealtime
The engine operates under realtime constraints, i.e. it will not make any blocking call
(e.g. calling libdispatch, blocking on a mutex, allocating memory etc.) while rendering.
Note that only the block based render mechanism can be used in this mode
(see `AVAudioEngine(manualRenderingBlock)`.
*/
typedef NS_ENUM(NSInteger, AVAudioEngineManualRenderingMode) {
AVAudioEngineManualRenderingModeOffline = 0,
AVAudioEngineManualRenderingModeRealtime = 1
} API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @typedef AVAudioEngineManualRenderingBlock
@abstract
Block to render the engine when operating in manual rendering mode
@param numberOfFrames
The number of PCM sample frames to be rendered
@param outBuffer
The PCM buffer to which the engine must render the audio.
The buffer pointers (outBuffer->mBuffers[x].mData) may be null on entry, in which case
the block will render into a memory it owns and modify the mData pointers to point to that
memory. The block is responsible for preserving the validity of that memory until it is next
called to render, or `AVAudioEngine(stop)` is called.
@param outError
On exit, if an error occurs during rendering, a description of the error (see
`AVAudioEngineManualRenderingError` for the possible errors)
@return
One of the status codes from `AVAudioEngineManualRenderingStatus`. Irrespective of the
returned status code, on exit, the output buffer's mDataByteSize
(outBuffer->mBuffers[x].mDataByteSize) will indicate the number of PCM data bytes rendered by
the engine.
@discussion
Use this if you want to render the engine from a realtime context when it is operating in
the manual rendering mode. See `AVAudioEngine(manualRenderingBlock)` for details.
*/
typedef AVAudioEngineManualRenderingStatus (^AVAudioEngineManualRenderingBlock)(AVAudioFrameCount numberOfFrames, AudioBufferList *outBuffer, OSStatus * __nullable outError) API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*!
@class AVAudioEngine
@discussion
An AVAudioEngine contains a group of connected AVAudioNodes ("nodes"), each of which performs
an audio signal generation, processing, or input/output task.
Nodes are created separately and attached to the engine.
The engine supports dynamic connection, disconnection and removal of nodes while running,
with only minor limitations:
- all dynamic reconnections must occur upstream of a mixer
- while removals of effects will normally result in the automatic connection of the adjacent
nodes, removal of a node which has differing input vs. output channel counts, or which
is a mixer, is likely to result in a broken graph.
By default, the engine is connected to an audio device and automatically renders in realtime.
It can also be configured to operate in manual rendering mode, i.e. not connected to an
audio device and rendering in response to requests from the client, normally at or
faster than realtime rate.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioEngine : NSObject {
@private
void *_impl;
}
/*! @method init
@abstract
Initialize a new engine.
@discussion
On creation, the engine is by default connected to an audio device and automatically renders
in realtime. It can be configured to operate in manual rendering mode through
`enableManualRenderingMode:format:maximumFrameCount:error:`.
*/
- (instancetype)init;
/*! @method attachNode:
@abstract
Take ownership of a new node.
@param node
The node to be attached to the engine.
@discussion
To support the instantiation of arbitrary AVAudioNode subclasses, instances are created
externally to the engine, but are not usable until they are attached to the engine via
this method. Thus the idiom, without ARC, is:
<pre>
// when building engine:
AVAudioNode *_player; // member of controller class (for example)
...
_player = [[AVAudioPlayerNode alloc] init];
[engine attachNode: _player];
...
// when destroying engine (without ARC)
[_player release];
</pre>
*/
- (void)attachNode:(AVAudioNode *)node;
/*! @method detachNode:
@abstract
Detach a node previously attached to the engine.
@discussion
If necessary, the engine will safely disconnect the node before detaching it.
*/
- (void)detachNode:(AVAudioNode *)node;
/*! @method connect:to:fromBus:toBus:format:
@abstract
Establish a connection between two nodes.
@param node1
The source node
@param node2
The destination node
@param bus1
The output bus on the source node
@param bus2
The input bus on the destination node
@param format
If non-nil, the format of the source node's output bus is set to this
format. In all cases, the format of the destination node's input bus is set to
match that of the source node's output bus.
@discussion
Nodes have input and output buses (AVAudioNodeBus). Use this method to establish
one-to-one connections betweeen nodes. Connections made using this method are always
one-to-one, never one-to-many or many-to-one.
Note that any pre-existing connection(s) involving the source's output bus or the
destination's input bus will be broken.
*/
- (void)connect:(AVAudioNode *)node1 to:(AVAudioNode *)node2 fromBus:(AVAudioNodeBus)bus1 toBus:(AVAudioNodeBus)bus2 format:(AVAudioFormat * __nullable)format;
/*! @method connect:to:format:
@abstract
Establish a connection between two nodes
@discussion
This calls connect:to:fromBus:toBus:format: using bus 0 on the source node,
and bus 0 on the destination node, except in the case of a destination which is a mixer,
in which case the destination is the mixer's nextAvailableInputBus.
*/
- (void)connect:(AVAudioNode *)node1 to:(AVAudioNode *)node2 format:(AVAudioFormat * __nullable)format;
/*! @method connect:toConnectionPoints:fromBus:format:
@abstract
Establish connections between a source node and multiple destination nodes.
@param sourceNode
The source node
@param destNodes
An array of AVAudioConnectionPoint objects specifying destination
nodes and busses
@param sourceBus
The output bus on source node
@param format
If non-nil, the format of the source node's output bus is set to this
format. In all cases, the format of the destination nodes' input bus is set to
match that of the source node's output bus
@discussion
Use this method to establish connections from a source node to multiple destination nodes.
Connections made using this method are either one-to-one (when a single destination
connection is specified) or one-to-many (when multiple connections are specified), but
never many-to-one.
To incrementally add a new connection to a source node, use this method with an array
of AVAudioConnectionPoint objects comprising of pre-existing connections (obtained from
`outputConnectionPointsForNode:outputBus:`) and the new connection.
Note that any pre-existing connection involving the destination's input bus will be
broken. And, any pre-existing connection on source node which is not a part of the
specified destination connection array will also be broken.
Also note that when the output of a node is split into multiple paths, all the paths
must render at the same rate until they reach a common mixer.
In other words, starting from the split node until the common mixer node where all split
paths terminate, you cannot have:
- any AVAudioUnitTimeEffect
- any sample rate conversion
*/
- (void)connect:(AVAudioNode *)sourceNode toConnectionPoints:(NSArray<AVAudioConnectionPoint *> *)destNodes fromBus:(AVAudioNodeBus)sourceBus format:(AVAudioFormat * __nullable)format NS_AVAILABLE(10_11, 9_0);
/*! @method disconnectNodeInput:bus:
@abstract
Remove a connection between two nodes.
@param node
The node whose input is to be disconnected
@param bus
The destination's input bus to disconnect
*/
- (void)disconnectNodeInput:(AVAudioNode *)node bus:(AVAudioNodeBus)bus;
/*! @method disconnectNodeInput:
@abstract
Remove a connection between two nodes.
@param node
The node whose inputs are to be disconnected
@discussion
Connections are broken on each of the node's input busses.
*/
- (void)disconnectNodeInput:(AVAudioNode *)node;
/*! @method disconnectNodeOutput:bus:
@abstract
Remove a connection between two nodes.
@param node
The node whose output is to be disconnected
@param bus
The source's output bus to disconnect
*/
- (void)disconnectNodeOutput:(AVAudioNode *)node bus:(AVAudioNodeBus)bus;
/*! @method disconnectNodeOutput:
@abstract
Remove a connection between two nodes.
@param node
The node whose outputs are to be disconnected
@discussion
Connections are broken on each of the node's output busses.
*/
- (void)disconnectNodeOutput:(AVAudioNode *)node;
/*! @method prepare
@abstract
Prepare the engine for starting.
@discussion
This method preallocates many of the resources the engine requires in order to start.
It can be used to be able to start more responsively.
*/
- (void)prepare;
/*! @method startAndReturnError:
@abstract
Start the engine.
@return
YES for success
@discussion
Calls prepare if it has not already been called since stop.
When the engine is rendering to/from an audio device, starts the audio hardware via the
AVAudioInputNode and/or AVAudioOutputNode instances in the engine. Audio begins to flow
through the engine.
Reasons for potential failure to start in this mode include:
1. There is problem in the structure of the graph. Input can't be routed to output or to a
recording tap through converter type nodes.
2. An AVAudioSession error.
3. The driver failed to start the hardware.
In manual rendering mode, prepares the engine to render when requested by the client.
*/
- (BOOL)startAndReturnError:(NSError **)outError;
/*! @method pause
@abstract
Pause the engine.
@discussion
When the engine is rendering to/from an audio device, stops the audio hardware and the flow
of audio through the engine. When operating in this mode, it is recommended that the engine
be paused or stopped (as applicable) when not in use, to minimize power consumption.
Pausing the engine does not deallocate the resources allocated by prepare. Resume the
engine by invoking start again.
*/
- (void)pause;
/*! @method reset
@abstract reset
Reset all of the nodes in the engine.
@discussion
This will reset all of the nodes in the engine. This is useful, for example, for silencing
reverb and delay tails.
In manual rendering mode, the render timeline is reset to a sample time of zero.
*/
- (void)reset;
/*! @method stop
@abstract
When the engine is rendering to/from an audio device, stops the audio hardware and the
engine. When operating in this mode, it is recommended that the engine be paused or stopped
(as applicable) when not in use, to minimize power consumption.
Stopping the engine releases the resources allocated by prepare.
*/
- (void)stop;
/*! @method inputConnectionPointForNode:inputBus:
@abstract
Get connection information on a node's input bus.
@param node
The node whose input connection is being queried.
@param bus
The node's input bus on which the connection is being queried.
@return
An AVAudioConnectionPoint object with connection information on the node's
specified input bus.
@discussion
Connections are always one-to-one or one-to-many, never many-to-one.
Returns nil if there is no connection on the node's specified input bus.
*/
- (AVAudioConnectionPoint * __nullable)inputConnectionPointForNode:(AVAudioNode *)node inputBus:(AVAudioNodeBus)bus NS_AVAILABLE(10_11, 9_0);
/*! @method outputConnectionPointsForNode:outputBus:
@abstract
Get connection information on a node's output bus.
@param node
The node whose output connections are being queried.
@param bus
The node's output bus on which connections are being queried.
@return
An array of AVAudioConnectionPoint objects with connection information on the node's
specified output bus.
@discussion
Connections are always one-to-one or one-to-many, never many-to-one.
Returns an empty array if there are no connections on the node's specified output bus.
*/
- (NSArray<AVAudioConnectionPoint *> *)outputConnectionPointsForNode:(AVAudioNode *)node outputBus:(AVAudioNodeBus)bus NS_AVAILABLE(10_11, 9_0);
#if AVAUDIOENGINE_HAVE_MUSICPLAYER
/*! @property musicSequence
@abstract
The MusicSequence previously attached to the engine (if any).
*/
@property (nonatomic, nullable) MusicSequence musicSequence;
#endif
/*! @property outputNode
@abstract
The engine's singleton output node.
@discussion
Audio output is performed via an output node. The engine creates a singleton on demand when
this property is first accessed. Connect another node to the input of the output node, or
obtain a mixer that is connected there by default, using the "mainMixerNode" property.
When the engine is rendering to/from an audio device, the AVAudioSesssion category and/or
availability of hardware determine whether an app can perform output. Check the output
format of output node (i.e. hardware format) for non-zero sample rate and channel count to
see if output is enabled.
Trying to perform output through the output node when it is not enabled or available will
cause the engine to throw an error (when possible) or an exception.
In manual rendering mode, the output format of the output node will determine the
render format of the engine. It can be changed through
`enableManualRenderingMode:format:maximumFrameCount:error:`.
*/
@property (readonly, nonatomic) AVAudioOutputNode *outputNode;
/*! @property inputNode
@abstract
The engine's singleton input node.
@discussion
Audio input is performed via an input node. The engine creates a singleton on demand when
this property is first accessed. To receive input, connect another node from the output of
the input node, or create a recording tap on it.
When the engine is rendering to/from an audio device, the AVAudioSesssion category and/or
availability of hardware determine whether an app can perform input (e.g. input hardware is
not available on tvos). Check for the input node's input format (i.e. hardware format) for
non-zero sample rate and channel count to see if input is enabled.
Trying to perform input through the input node when it is not enabled or available will
cause the engine to throw an error (when possible) or an exception.
In manual rendering mode, the input node can be used to synchronously supply data to
the engine while it is rendering (see
`AVAudioInputNode(setManualRenderingInputPCMFormat:inputBlock:)`.
*/
@property (readonly, nonatomic) AVAudioInputNode *inputNode API_AVAILABLE(macos(10.10), ios(8.0), watchos(4.0), tvos(11.0));
/*! @property mainMixerNode
@abstract
The engine's optional singleton main mixer node.
@discussion
The engine will construct a singleton main mixer and connect it to the outputNode on demand,
when this property is first accessed. You can then connect additional nodes to the mixer.
By default, the mixer's output format (sample rate and channel count) will track the format
of the output node. You may however make the connection explicitly with a different format.
*/
@property (readonly, nonatomic) AVAudioMixerNode *mainMixerNode;
/*! @property running
@abstract
The engine's running state.
*/
@property (readonly, nonatomic, getter=isRunning) BOOL running;
/*! @property autoShutdownEnabled
@abstract
When auto shutdown is enabled, the engine can start and stop the audio hardware dynamically,
to conserve power. This is the enforced behavior on watchOS and can be optionally enabled on
other platforms.
@discussion
To conserve power, it is advised that the client pause/stop the engine when not in use.
But when auto shutdown is enabled, the engine will stop the audio hardware if it was running
idle for a certain duration, and restart it later when required.
Note that, because this operation is dynamic, it may affect the start times of the source
nodes (e.g. `AVAudioPlayerNode`), if the engine has to resume from its shutdown state.
On watchOS, auto shutdown is always enabled. On other platforms, it is disabled by
default, but the client can enable it if needed.
This property is applicable only when the engine is rendering to/from an audio device. If
the value is changed when the engine is in manual rendering mode, it will take effect
whenever the engine is switched to render to/from the audio device.
*/
@property (nonatomic, getter=isAutoShutdownEnabled) BOOL autoShutdownEnabled API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0)) API_UNAVAILABLE(watchos);
#pragma mark -
#pragma mark Manual Rendering Mode
/*! @method enableManualRenderingMode:format:maximumFrameCount:error:
@abstract
Set the engine to operate in manual rendering mode with the specified render format and
maximum frame count.
@param format
The format of the output PCM audio data from the engine
@param maximumFrameCount
The maximum number of PCM sample frames the engine will be asked to produce in any single
render call
@param outError
On exit, if the engine cannot switch to the manual rendering mode, a description of the
error (see `AVAudioEngineManualRenderingError` for the possible errors)
@return
YES for success
@discussion
Use this method to configure the engine to render in response to requests from the client.
The engine must be in a stopped state before calling this method.
The render format must be a PCM format and match the format of the buffer to which
the engine is asked to render (see `renderOffline:toBuffer:error:`).
The input data in manual rendering mode can be supplied through the source nodes, e.g.
`AVAudioPlayerNode`, `AVAudioInputNode` etc.
When switching to manual rendering mode, the engine:
1. Switches the input and output nodes to manual rendering mode. Their input and output
formats may change.
2. Removes any taps previously installed on the input and output nodes.
3. Maintains all the engine connections as is.
Reasons for potential failure when switching to manual rendering mode include:
- Engine is not in a stopped state.
*/
- (BOOL)enableManualRenderingMode:(AVAudioEngineManualRenderingMode)mode format:(AVAudioFormat *)pcmFormat maximumFrameCount:(AVAudioFrameCount)maximumFrameCount error:(NSError **)outError API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @method disableManualRenderingMode
@abstract
Set the engine to render to/from an audio device.
@discussion
When disabling the manual rendering mode, the engine:
1. Stops and resets itself (see `stop` and `reset`).
2. Switches the output/input nodes to render to/from an audio device. Their input and
output formats may change.
3. Removes any taps previously installed on the input and output nodes.
4. Maintains all the engine connections as is.
Calling this method when the engine is already rendering to/from an audio device has no
effect.
*/
- (void)disableManualRenderingMode API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @method renderOffline:toBuffer:error:
@abstract
Render call to the engine operating in the offline manual rendering mode
@param numberOfFrames
The number of PCM sample frames to be rendered
@param buffer
The PCM buffer to which the engine must render the audio
@param outError
On exit, if an error occurs during rendering, a description of the error (see
`AVAudioEngineManualRenderingError` for the possible errors)
@return
One of the status codes from `AVAudioEngineManualRenderingStatus`. Irrespective of the
returned status code, on exit, the output buffer's frameLength will indicate the number of
PCM samples rendered by the engine
@discussion
The engine must be in the offline manual rendering mode
(`AVAudioEngineManualRenderingModeOffline`) and started before calling this method.
The format of the buffer must match the render format set through
`enableManualRenderingMode:format:maximumFrameCount:error:`. The buffer capacity must be
greater than or equal to the number of samples asked to render.
On exit, the buffer's frameLength will indicate the number of PCM samples rendered by the
engine.
The engine's timeline in manual rendering mode starts at a sample time of zero, and is in
terms of the render format's sample rate. Resetting the engine (see `reset`) will reset the
timeline back to zero.
When rendering in `AVAudioEngineManualRenderingModeRealtime`, this ObjC render method
must not be used, an error is returned otherwise. Use the block based render call
(`manualRenderingBlock`) in that mode instead.
*/
- (AVAudioEngineManualRenderingStatus)renderOffline:(AVAudioFrameCount)numberOfFrames toBuffer:(AVAudioPCMBuffer *)buffer error:(NSError **)outError API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0)) __attribute__((swift_error(nonnull_error)));
/*! @property manualRenderingBlock
@abstract
Block to render the engine operating in manual rendering mode
@discussion
This block based render call must be used to render the engine when operating in
`AVAudioEngineManualRenderingModeRealtime`. In this mode, the engine operates under
realtime constraints and will not make any blocking call (e.g. calling libdispatch, blocking
on a mutex, allocating memory etc.) while rendering.
Before invoking the rendering functionality, client must fetch this block and cache the
result. The block can then be called from a realtime context, without any possibility of
blocking.
When rendering in `AVAudioEngineManualRenderingModeOffline`, either this block based render
call or `renderOffline:toBuffer:error:` ObjC method can be used.
All the rules outlined in `renderOffline:toBuffer:error:` are applicable here as well.
*/
@property (readonly, nonatomic) AVAudioEngineManualRenderingBlock manualRenderingBlock API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property isInManualRenderingMode
@abstract
Whether or not the engine is operating in manual rendering mode, i.e. not connected
to an audio device and rendering in response to the requests from the client
*/
@property (readonly, nonatomic) BOOL isInManualRenderingMode API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property manualRenderingMode
@abstract
The manual rendering mode configured on the engine
@discussion
This property is meaningful only when the engine is operating in manual rendering mode,
i.e. when `isInManualRenderingMode` returns true.
*/
@property (readonly, nonatomic) AVAudioEngineManualRenderingMode manualRenderingMode API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property manualRenderingFormat
@abstract
The render format of the engine in manual rendering mode.
@discussion
Querying this property when the engine is not in manual rendering mode will return an
invalid format, with zero sample rate and channel count.
*/
@property (readonly, nonatomic) AVAudioFormat *manualRenderingFormat API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property manualRenderingMaximumFrameCount
@abstract
The maximum number of PCM sample frames the engine can produce in any single render call in
the manual rendering mode.
@discussion
Querying this property when the engine is not in manual rendering mode will return zero.
*/
@property (readonly, nonatomic) AVAudioFrameCount manualRenderingMaximumFrameCount API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property manualRenderingSampleTime
@abstract
Indicates where the engine is on its render timeline in manual rendering mode.
@discussion
The timeline in manual rendering mode starts at a sample time of zero, and is in terms
of the render format's sample rate. Resetting the engine (see `reset`) will reset the
timeline back to zero.
*/
@property (readonly, nonatomic) AVAudioFramePosition manualRenderingSampleTime API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
@end // AVAudioEngine
/*! @constant AVAudioEngineConfigurationChangeNotification
@abstract
A notification generated on engine configuration changes when rendering to/from an audio
device.
@discussion
Register for this notification on your engine instances, as follows:
[[NSNotificationCenter defaultCenter] addObserver: myObject
selector: @selector(handleInterruption:)
name: AVAudioEngineConfigurationChangeNotification
object: engine];
When the engine's I/O unit observes a change to the audio input or output hardware's
channel count or sample rate, the engine stops itself (see `AVAudioEngine(stop)`), and
issues this notification.
The nodes remain attached and connected with previously set formats. However, the app
must reestablish connections if the connection formats need to change (e.g. in an
input node chain, connections must follow the hardware sample rate, while in an output only
chain, the output node supports rate conversion).
Note that the engine must not be deallocated from within the client's notification handler
because the callback happens on an internal dispatch queue and can deadlock while trying to
synchronously teardown the engine.
*/
AVF_EXPORT
NSString *const AVAudioEngineConfigurationChangeNotification NS_AVAILABLE(10_10, 8_0);
NS_ASSUME_NONNULL_END

View file

@ -1,269 +0,0 @@
/*
File: AVAudioEnvironmentNode.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioNode.h>
#import <AVFAudio/AVAudioUnitReverb.h>
#import <AVFAudio/AVAudioUnitEQ.h>
#import <AVFAudio/AVAudioMixing.h>
NS_ASSUME_NONNULL_BEGIN
/*! @enum AVAudioEnvironmentDistanceAttenuationModel
@abstract Types of distance attenuation models
@discussion
Distance attenuation is the natural attenuation of sound when traveling from the source to
the listener. The different attenuation models listed below describe the drop-off in gain as
the source moves away from the listener.
AVAudioEnvironmentDistanceAttenuationModelExponential
distanceGain = (distance / referenceDistance) ^ (-rolloffFactor)
AVAudioEnvironmentDistanceAttenuationModelInverse
distanceGain = referenceDistance / (referenceDistance + rolloffFactor *
(distance referenceDistance))
AVAudioEnvironmentDistanceAttenuationModelLinear
distanceGain = (1 rolloffFactor * (distance referenceDistance) /
(maximumDistance referenceDistance))
With all the distance models, if the formula can not be evaluated then the source will not
be attenuated. For example, if a linear model is being used with referenceDistance equal
to maximumDistance, then the gain equation will have a divide-by-zero error in it. In this case,
there is no attenuation for that source.
All the values for distance are specified in meters.
*/
typedef NS_ENUM(NSInteger, AVAudioEnvironmentDistanceAttenuationModel) {
AVAudioEnvironmentDistanceAttenuationModelExponential = 1,
AVAudioEnvironmentDistanceAttenuationModelInverse = 2,
AVAudioEnvironmentDistanceAttenuationModelLinear = 3
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @class AVAudioEnvironmentDistanceAttenuationParameters
@abstract Parameters specifying the amount of distance attenuation
@discussion
A standalone instance of AVAudioEnvironmentDistanceAttenuationParameters cannot be created.
Only an instance vended out by a source object (e.g. AVAudioEnvironmentNode) can be used.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioEnvironmentDistanceAttenuationParameters : NSObject {
@private
void *_impl;
}
- (instancetype)init NS_UNAVAILABLE;
/*! @property distanceAttenuationModel
@abstract Type of distance attenuation model
@discussion
Default: AVAudioEnvironmentDistanceAttenuationModelInverse
*/
@property (nonatomic) AVAudioEnvironmentDistanceAttenuationModel distanceAttenuationModel;
/*! @property referenceDistance
@abstract The minimum distance at which attenuation is applied
@discussion
Default: 1.0 meter
Models: AVAudioEnvironmentDistanceAttenuationModelInverse,
AVAudioEnvironmentDistanceAttenuationModelLinear
*/
@property (nonatomic) float referenceDistance;
/*! @property maximumDistance
@abstract The distance beyond which no further attenuation is applied
@discussion
Default: 100000.0 meters
Models: AVAudioEnvironmentDistanceAttenuationModelLinear
*/
@property (nonatomic) float maximumDistance;
/*! @property rolloffFactor
@abstract Determines the attenuation curve
@discussion
A higher value results in a steeper attenuation curve.
The rolloff factor should be a value greater than 0.
Default: 1.0
Models: AVAudioEnvironmentDistanceAttenuationModelExponential
AVAudioEnvironmentDistanceAttenuationModelInverse
AVAudioEnvironmentDistanceAttenuationModelLinear
*/
@property (nonatomic) float rolloffFactor;
@end
/*! @class AVAudioEnvironmentReverbParameters
@abstract Parameters used to control the reverb in AVAudioEnvironmentNode
@discussion
Reverberation can be used to simulate the acoustic characteristics of an environment.
AVAudioEnvironmentNode has a built in reverb that describes the space that the listener
is in.
The reverb also has a single filter that sits at the end of the chain. This filter is useful
to shape the overall sound of the reverb. For instance, one of the reverb presets can be
selected to simulate the general space and then the filter can be used to brighten or darken
the overall sound.
A standalone instance of AVAudioEnvironmentReverbParameters cannot be created.
Only an instance vended out by a source object (e.g. AVAudioEnvironmentNode) can be used.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioEnvironmentReverbParameters : NSObject {
@private
void *_impl;
}
- (instancetype)init NS_UNAVAILABLE;
/*! @property enable
@abstract Turns on/off the reverb
@discussion
Default: NO
*/
@property (nonatomic) BOOL enable;
/*! @property level
@abstract Controls the master level of the reverb
@discussion
Range: -40 to 40 dB
Default: 0.0
*/
@property (nonatomic) float level;
/*! @property filterParameters
@abstract filter that applies to the output of the reverb
*/
@property (nonatomic, readonly) AVAudioUnitEQFilterParameters *filterParameters;
/*! @method loadFactoryReverbPreset:
@abstract Load one of the reverb's factory presets
@param preset
Reverb preset to be set.
@discussion
Loading a factory reverb preset changes the sound of the reverb. This works independently
of the filter which follows the reverb in the signal chain.
*/
- (void)loadFactoryReverbPreset:(AVAudioUnitReverbPreset)preset;
@end
/*!
@class AVAudioEnvironmentNode
@abstract Mixer node that simulates a 3D environment
@discussion
AVAudioEnvironmentNode is a mixer node that simulates a 3D audio environment. Any node that
conforms to the AVAudioMixing protocol (e.g. AVAudioPlayerNode) can act as a source in this
environment.
The environment has an implicit "listener". By controlling the listener's position and
orientation, the application controls the way the user experiences the virtual world.
In addition, this node also defines properties for distance attenuation and reverberation
that help characterize the environment.
It is important to note that only inputs with a mono channel connection format to the
environment node are spatialized. If the input is stereo, the audio is passed through
without being spatialized. Currently inputs with connection formats of more than 2 channels
are not supported.
In order to set the environment nodes output to a multichannel format, use an AVAudioFormat
having one of the following AudioChannelLayoutTags.
kAudioChannelLayoutTag_AudioUnit_4
kAudioChannelLayoutTag_AudioUnit_5_0;
kAudioChannelLayoutTag_AudioUnit_6_0;
kAudioChannelLayoutTag_AudioUnit_7_0;
kAudioChannelLayoutTag_AudioUnit_7_0_Front;
kAudioChannelLayoutTag_AudioUnit_8;
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioEnvironmentNode : AVAudioNode <AVAudioMixing>
- (instancetype)init NS_DESIGNATED_INITIALIZER;
/*! @property outputVolume
@abstract The mixer's output volume.
@discussion
This accesses the mixer's output volume (0.0-1.0, inclusive).
*/
@property (nonatomic) float outputVolume;
/*! @property nextAvailableInputBus
@abstract Find an unused input bus
@discussion
This will find and return the first input bus to which no other node is connected.
*/
@property (nonatomic, readonly) AVAudioNodeBus nextAvailableInputBus;
/*! @property listenerPosition
@abstract Sets the listener's position in the 3D environment
@discussion
The coordinates are specified in meters.
Default:
The default position of the listener is at the origin.
x: 0.0
y: 0.0
z: 0.0
*/
@property (nonatomic) AVAudio3DPoint listenerPosition;
/*! @property listenerVectorOrientation
@abstract The listener's orientation in the environment
@discussion
Changing listenerVectorOrientation will result in a corresponding change in listenerAngularOrientation.
Default:
The default orientation is with the listener looking directly along the negative Z axis.
forward: (0, 0, -1)
up: (0, 1, 0)
*/
@property (nonatomic) AVAudio3DVectorOrientation listenerVectorOrientation;
/*! @property listenerAngularOrientation
@abstract The listener's orientation in the environment
@discussion
Changing listenerAngularOrientation will result in a corresponding change in listenerVectorOrientation.
All angles are specified in degrees.
Default:
The default orientation is with the listener looking directly along the negative Z axis.
yaw: 0.0
pitch: 0.0
roll: 0.0
*/
@property (nonatomic) AVAudio3DAngularOrientation listenerAngularOrientation;
/*! @property distanceAttenuationParameters
@abstract The distance attenuation parameters for the environment
*/
@property (nonatomic, readonly) AVAudioEnvironmentDistanceAttenuationParameters *distanceAttenuationParameters;
/*! @property reverbParameters
@abstract The reverb parameters for the environment
*/
@property (nonatomic, readonly) AVAudioEnvironmentReverbParameters *reverbParameters;
/*! @property applicableRenderingAlgorithms
@abstract Returns an array of AVAudio3DMixingRenderingAlgorithm values based on the current output format
@discussion
AVAudioEnvironmentNode supports several rendering algorithms per input bus which are defined
in <AVFAudio/AVAudioMixing.h>.
Depending on the current output format of the environment node, this method returns
an immutable array of the applicable rendering algorithms. This is important when the
environment node has been configured to a multichannel output format because only a subset
of the available rendering algorithms are designed to render to all of the channels.
This information should be retrieved after a successful connection to the destination node
via the engine's connect method.
*/
@property (nonatomic, readonly) NSArray<NSNumber *> *applicableRenderingAlgorithms;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,167 +0,0 @@
/*
File: AVAudioFile.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#import <AVFAudio/AVAudioFormat.h>
NS_ASSUME_NONNULL_BEGIN
@class NSURL;
@class AVAudioPCMBuffer;
/*!
@class AVAudioFile
@abstract
AVAudioFile represents an audio file opened for reading or writing.
@discussion
Regardless of the file's actual format, reading and writing the file is done via
`AVAudioPCMBuffer` objects, containing samples in an `AVAudioCommonFormat`,
referred to as the file's "processing format." Conversions are performed to and from
the file's actual format.
Reads and writes are always sequential, but random access is possible by setting the
framePosition property.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioFile : NSObject {
@private
void *_impl;
}
/*! @method initForReading:error:
@abstract Open a file for reading.
@param fileURL
the file to open
@param outError
on exit, if an error occurs, a description of the error
@discussion
This opens the file for reading using the standard format (deinterleaved floating point).
*/
- (nullable instancetype)initForReading:(NSURL *)fileURL error:(NSError **)outError;
/*! @method initForReading:commonFormat:interleaved:error:
@abstract Open a file for reading, using a specified processing format.
@param fileURL
the file to open
@param format
the processing format to use when reading from the file
@param interleaved
whether to use an interleaved processing format
@param outError
on exit, if an error occurs, a description of the error
*/
- (nullable instancetype)initForReading:(NSURL *)fileURL commonFormat:(AVAudioCommonFormat)format interleaved:(BOOL)interleaved error:(NSError **)outError;
/*! @method initForWriting:settings:error:
@abstract Open a file for writing.
@param fileURL
the path at which to create the file
@param settings
the format of the file to create (See `AVAudioRecorder`.)
@param outError
on exit, if an error occurs, a description of the error
@discussion
The file type to create can be set through the corresponding settings key. If not set, it will be
inferred from the file extension. Will overwrite a file at the specified URL if a file exists.
This opens the file for writing using the standard format (deinterleaved floating point).
*/
- (nullable instancetype)initForWriting:(NSURL *)fileURL settings:(NSDictionary<NSString *, id> *)settings error:(NSError **)outError;
/*! @method initForWriting:settings:commonFormat:interleaved:error:
@abstract Open a file for writing.
@param fileURL
the path at which to create the file
@param settings
the format of the file to create (See `AVAudioRecorder`.)
@param format
the processing format to use when writing to the file
@param interleaved
whether to use an interleaved processing format
@param outError
on exit, if an error occurs, a description of the error
@discussion
The file type to create can be set through the corresponding settings key. If not set, it will be
inferred from the file extension. Will overwrite a file at the specified URL if a file exists.
*/
- (nullable instancetype)initForWriting:(NSURL *)fileURL settings:(NSDictionary<NSString *, id> *)settings commonFormat:(AVAudioCommonFormat)format interleaved:(BOOL)interleaved error:(NSError **)outError;
/*! @method readIntoBuffer:error:
@abstract Read an entire buffer.
@param buffer
The buffer into which to read from the file. Its format must match the file's
processing format.
@param outError
on exit, if an error occurs, a description of the error
@return
YES for success.
@discussion
Reading sequentially from framePosition, attempts to fill the buffer to its capacity. On
return, the buffer's length indicates the number of sample frames successfully read.
*/
- (BOOL)readIntoBuffer:(AVAudioPCMBuffer *)buffer error:(NSError **)outError;
/*! @method readIntoBuffer:frameCount:error:
@abstract Read a portion of a buffer.
@param frames
The number of frames to read.
@param buffer
The buffer into which to read from the file. Its format must match the file's
processing format.
@param outError
on exit, if an error occurs, a description of the error
@return
YES for success.
@discussion
Like `readIntoBuffer:error:`, but can be used to read fewer frames than buffer.frameCapacity.
*/
- (BOOL)readIntoBuffer:(AVAudioPCMBuffer *)buffer frameCount:(AVAudioFrameCount)frames error:(NSError **)outError;
/*! @method writeFromBuffer:error:
@abstract Write a buffer.
@param buffer
The buffer from which to write to the file. Its format must match the file's
processing format.
@param outError
on exit, if an error occurs, a description of the error
@return
YES for success.
@discussion
Writes sequentially. The buffer's frameLength signifies how much of the buffer is to be written.
*/
- (BOOL)writeFromBuffer:(const AVAudioPCMBuffer *)buffer error:(NSError **)outError;
/*! @property url
@abstract The URL the file is reading or writing.
*/
@property (nonatomic, readonly) NSURL *url;
/*! @property fileFormat
@abstract The on-disk format of the file.
*/
@property (nonatomic, readonly) AVAudioFormat *fileFormat;
/*! @property processingFormat
@abstract The processing format of the file.
*/
@property (nonatomic, readonly) AVAudioFormat *processingFormat;
/*! @property length
@abstract The number of sample frames in the file.
@discussion
Note: this can be expensive to compute for the first time.
*/
@property (nonatomic, readonly) AVAudioFramePosition length;
/*! @property framePosition
@abstract The position in the file at which the next read or write will occur.
@discussion
Set framePosition to perform a seek before a read or write. A read or write operation advances the frame position by the number of frames read or written.
*/
@property (nonatomic) AVAudioFramePosition framePosition;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,231 +0,0 @@
/*
File: AVAudioFormat.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#import <AVFAudio/AVAudioChannelLayout.h>
#if __has_include(<CoreMedia/CMFormatDescription.h>)
#define AVAUDIOFORMAT_HAVE_CMFORMATDESCRIPTION 1
#import <CoreMedia/CMFormatDescription.h>
#endif
NS_ASSUME_NONNULL_BEGIN
/*!
@enum AVAudioCommonFormat
@constant AVAudioOtherFormat
A format other than one of the common ones below.
@constant AVAudioPCMFormatFloat32
Native-endian floats (this is the standard format).
@constant AVAudioPCMFormatFloat64
Native-endian doubles.
@constant AVAudioPCMFormatInt16
Signed 16-bit native-endian integers.
@constant AVAudioPCMFormatInt32
Signed 32-bit native-endian integers.
*/
typedef NS_ENUM(NSUInteger, AVAudioCommonFormat) {
AVAudioOtherFormat = 0,
AVAudioPCMFormatFloat32 = 1,
AVAudioPCMFormatFloat64 = 2,
AVAudioPCMFormatInt16 = 3,
AVAudioPCMFormatInt32 = 4
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @class AVAudioFormat
@abstract A representation of an audio format.
@discussion
AVAudioFormat wraps a Core Audio AudioStreamBasicDescription struct, with convenience
initializers and accessors for common formats, including Core Audio's standard deinterleaved
32-bit floating point.
Instances of this class are immutable.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioFormat : NSObject <NSSecureCoding> {
@private
AudioStreamBasicDescription _asbd;
AVAudioChannelLayout *_layout;
AVAudioCommonFormat _commonFormat;
void * _reserved;
}
/*! @method initWithStreamDescription:
@abstract Initialize from an AudioStreamBasicDescription.
@param asbd
the AudioStreamBasicDescription
@discussion
If the format specifies more than 2 channels, this method fails (returns nil).
*/
- (nullable instancetype)initWithStreamDescription:(const AudioStreamBasicDescription *)asbd;
/*! @method initWithStreamDescription:channelLayout:
@abstract Initialize from an AudioStreamBasicDescription and optional channel layout.
@param asbd
the AudioStreamBasicDescription
@param layout
the channel layout. Can be nil only if asbd specifies 1 or 2 channels.
@discussion
If the format specifies more than 2 channels, this method fails (returns nil) unless layout
is non-nil.
*/
- (nullable instancetype)initWithStreamDescription:(const AudioStreamBasicDescription *)asbd channelLayout:(AVAudioChannelLayout * __nullable)layout;
/*! @method initStandardFormatWithSampleRate:channels:
@abstract Initialize to deinterleaved float with the specified sample rate and channel count.
@param sampleRate
the sample rate
@param channels
the channel count
@discussion
If the format specifies more than 2 channels, this method fails (returns nil).
*/
- (nullable instancetype)initStandardFormatWithSampleRate:(double)sampleRate channels:(AVAudioChannelCount)channels;
/*! @method initStandardFormatWithSampleRate:channelLayout:
@abstract Initialize to deinterleaved float with the specified sample rate and channel layout.
@param sampleRate
the sample rate
@param layout
the channel layout. must not be nil.
*/
- (instancetype)initStandardFormatWithSampleRate:(double)sampleRate channelLayout:(AVAudioChannelLayout *)layout;
/*! @method initWithCommonFormat:sampleRate:channels:interleaved:
@abstract Initialize to float with the specified sample rate, channel count and interleavedness.
@param format
the common format type
@param sampleRate
the sample rate
@param channels
the channel count
@param interleaved
true if interleaved
@discussion
If the format specifies more than 2 channels, this method fails (returns nil).
*/
- (nullable instancetype)initWithCommonFormat:(AVAudioCommonFormat)format sampleRate:(double)sampleRate channels:(AVAudioChannelCount)channels interleaved:(BOOL)interleaved;
/*! @method initWithCommonFormat:sampleRate:interleaved:channelLayout:
@abstract Initialize to float with the specified sample rate, channel layout and interleavedness.
@param format
the common format type
@param sampleRate
the sample rate
@param interleaved
true if interleaved
@param layout
the channel layout. must not be nil.
*/
- (instancetype)initWithCommonFormat:(AVAudioCommonFormat)format sampleRate:(double)sampleRate interleaved:(BOOL)interleaved channelLayout:(AVAudioChannelLayout *)layout;
/*! @method initWithSettings:
@abstract Initialize using a settings dictionary.
@discussion
See AVAudioSettings.h. Note that many settings dictionary elements pertain to encoder
settings, not the basic format, and will be ignored.
Returns nil if a format cannot be constructed with the provided settings, e.g. when:
- AVNumberOfChannelsKey specifies more than 2 channels, but AVChannelLayoutKey hasn't
been specified or the layout does not match
- AVLinearPCMBitDepthKey for linear PCM format specifies less than 8 or greater
than 32 bits
- values for the keys are not of the expected types
*/
- (nullable instancetype)initWithSettings:(NSDictionary<NSString *, id> *)settings;
#if AVAUDIOFORMAT_HAVE_CMFORMATDESCRIPTION
/*!
@method initWithCMAudioFormatDescription:
@abstract initialize from a CMAudioFormatDescriptionRef.
@param formatDescription
the CMAudioFormatDescriptionRef.
@discussion
If formatDescription is invalid, this method fails (returns nil).
*/
- (instancetype)initWithCMAudioFormatDescription:(CMAudioFormatDescriptionRef)formatDescription NS_AVAILABLE(10_11, 9_0);
#endif
/*! @method isEqual:
@abstract Determine whether another format is functionally equivalent.
@param object
the format to compare against
@discussion
For PCM, interleavedness is ignored for mono. Differences in the AudioStreamBasicDescription
alignment and packedness are ignored when they are not significant (e.g. with 1 channel, 2
bytes per frame and 16 bits per channel, neither alignment, the format is implicitly packed
and can be interpreted as either high- or low-aligned.)
For AVAudioChannelLayout, a layout with standard mono/stereo tag is considered to be
equivalent to a nil layout. Otherwise, the layouts are compared for equality.
*/
- (BOOL)isEqual:(id)object;
/*! @property standard
@abstract Describes whether the format is deinterleaved native-endian float.
*/
@property (nonatomic, readonly, getter=isStandard) BOOL standard;
/*! @property commonFormat
@abstract An `AVAudioCommonFormat` identifying the format
*/
@property (nonatomic, readonly) AVAudioCommonFormat commonFormat;
/*! @property channelCount
@abstract The number of channels of audio data.
*/
@property (nonatomic, readonly) AVAudioChannelCount channelCount;
/*! @property sampleRate
@abstract A sampling rate in Hertz.
*/
@property (nonatomic, readonly) double sampleRate;
/*! @property interleaved
@abstract Describes whether the samples are interleaved.
@discussion
For non-PCM formats, the value is undefined.
*/
@property (nonatomic, readonly, getter=isInterleaved) BOOL interleaved;
/*! @property streamDescription
@abstract Returns the AudioStreamBasicDescription, for use with lower-level audio API's.
*/
@property (nonatomic, readonly) const AudioStreamBasicDescription *streamDescription;
/*! @property channelLayout
@abstract The underlying AVAudioChannelLayout, if any.
@discussion
Only formats with more than 2 channels are required to have channel layouts.
*/
@property (nonatomic, readonly, nullable) AVAudioChannelLayout *channelLayout;
/*! @property magicCookie
@abstract The underlying magic cookie, if any.
@discussion
A magic cookie contains metadata associated with encoders and decoders.
Encoders produce a magic cookie, and some decoders require a magic cookie to decode properly.
*/
@property (nonatomic, retain, nullable) NSData *magicCookie API_AVAILABLE(macos(10.12), ios(10.0), watchos(3.0), tvos(10.0));
/*! @property settings
@abstract Returns the format represented as a dictionary with keys from AVAudioSettings.h.
*/
@property (nonatomic, readonly) NSDictionary<NSString *, id> *settings;
#if AVAUDIOFORMAT_HAVE_CMFORMATDESCRIPTION
/*!
@property formatDescription
@abstract Converts to a CMAudioFormatDescriptionRef, for use with Core Media API's.
*/
@property (nonatomic, readonly) CMAudioFormatDescriptionRef formatDescription NS_AVAILABLE(10_11, 9_0);
#endif
@end
NS_ASSUME_NONNULL_END

View file

@ -1,151 +0,0 @@
/*
File: AVAudioIONode.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioNode.h>
#import <AVFAudio/AVAudioMixing.h>
#if __has_include(<AudioToolbox/AudioUnit.h>)
#define AVAUDIOIONODE_HAVE_AUDIOUNIT 1
#import <AudioToolbox/AudioUnit.h>
#endif
NS_ASSUME_NONNULL_BEGIN
/*! @typedef AVAudioIONodeInputBlock
@abstract
A block which will be called by AVAudioEngine's render call when operating in the manual
rendering mode, to get input data as needed.
@param inNumberOfFrames
The number of frames required to complete the request. You may supply either these many
frames or none.
@return
An AudioBufferList containing data to be rendered, or null if no data is available.
The data in the returned buffer must not be cleared or re-filled until the input block is
called again or the rendering has finished.
The format of the returned buffer must match the format specified when registering the
block.
@discussion
If you are out of data and return null or less than the requested number of frames, this
data will not be used for rendering. The engine will try to render from other active
sources in the processing graph, and will inform about the input node's status in the error
returned from its render call.
Note that when the engine is configured to operate in
`AVAudioEngineManualRenderingModeRealtime`, this block will be called from a realtime
context. Care should be taken not to make any blocking call (e.g. calling libdispatch,
blocking on a mutex, allocating memory etc.) which may cause an overload at the lower layers.
*/
typedef const AudioBufferList * __nullable (^AVAudioIONodeInputBlock)(AVAudioFrameCount inNumberOfFrames) API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @class AVAudioIONode
@abstract
Base class for a node that performs audio input or output in the engine.
@discussion
When the engine is configured to render to/from an audio device, on macOS, AVAudioInputNode
and AVAudioOutputNode communicate with the system's default input and output devices.
On iOS, they communicate with the devices appropriate to the app's AVAudioSession category
and other configuration, also considering the user's actions such as
connecting/disconnecting external devices.
In the manual rendering mode, the AVAudioInputNode and AVAudioOutputNode perform the input
and output in the engine, in response to client's request.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioIONode : AVAudioNode
/*! @property presentationLatency
@abstract
The presentation or hardware latency, applicable when the engine is rendering to/from an
audio device.
@discussion
This corresponds to kAudioDevicePropertyLatency and kAudioStreamPropertyLatency.
See <CoreAudio/AudioHardwareBase.h>.
*/
@property (nonatomic, readonly) NSTimeInterval presentationLatency;
#if AVAUDIOIONODE_HAVE_AUDIOUNIT
/*! @property audioUnit
@abstract
The node's underlying AudioUnit, if any.
@discussion
This is only necessary for certain advanced usages.
*/
@property (nonatomic, readonly, nullable) AudioUnit audioUnit;
#endif
@end
/*! @class AVAudioInputNode
@abstract
A node that performs audio input in the engine.
@discussion
When the engine is rendering to/from an audio device, this node connects to the system's
audio input.
When the engine is operating in manual rendering mode, this node can be used to supply
the input data to the engine.
This node has one element.
The format of the input scope reflects:
- the audio hardware sample rate and channel count, when connected to the hardware
- the format of the PCM audio data that the node will supply to the engine, in the
manual rendering mode (see `setManualRenderingInputPCMFormat:inputBlock:`)
When rendering from an audio device, the input node does not support format conversion.
Hence the format of the output scope must be same as that of the input, as well as the
formats for all the nodes connected in the input node chain.
In the manual rendering mode, the format of the output scope is initially the same as that
of the input, but you may set it to a different format, in which case the node will convert.
*/
API_AVAILABLE(macos(10.10), ios(8.0), watchos(4.0), tvos(11.0))
@interface AVAudioInputNode : AVAudioIONode <AVAudioMixing>
- (instancetype)init NS_UNAVAILABLE; // fetch instance via -[AVAudioEngine inputNode].
/*! @method setManualRenderingInputPCMFormat:inputBlock:
@abstract
Supply the data through the input node to the engine operating in the manual rendering mode.
@param format
The format of the PCM audio data the block will supply to the engine
@param block
The block the engine will call on the input node to get the audio to send to the output,
when operating in the manual rendering mode. See `AVAudioIONodeInputBlock` for more details
@return
YES for success
@discussion
This block must be set if the input node is being used when the engine is operating in
manual rendering mode.
Switching the engine to render to/from an audio device invalidates any previously set block,
and makes this method ineffective.
*/
- (BOOL)setManualRenderingInputPCMFormat:(AVAudioFormat *)format inputBlock:(AVAudioIONodeInputBlock)block API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
@end
/*! @class AVAudioOutputNode
@abstract
A node that performs audio output in the engine.
@discussion
When the engine is rendering to/from an audio device, this node connects to the system's
audio output.
When the engine is operating in manual rendering mode, this node performs output in
response to client's requests.
This node has one element.
The format of the output scope reflects:
- the audio hardware sample rate and channel count, when connected to the hardware
- the engine's manual rendering mode output format (see
`AVAudioEngine(manualRenderingFormat)`), in the manual rendering mode
The format of the input scope is initially the same as that of the
output, but you may set it to a different format, in which case the node will convert.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioOutputNode : AVAudioIONode
- (instancetype)init NS_UNAVAILABLE; // fetch instance via -[AVAudioEngine outputNode].
@end
NS_ASSUME_NONNULL_END

View file

@ -1,43 +0,0 @@
/*
File: AVAudioMixerNode.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioNode.h>
#import <AVFAudio/AVAudioMixing.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioMixerNode
@abstract A node that mixes its inputs to a single output.
@discussion
Mixers may have any number of inputs.
The mixer accepts input at any sample rate and efficiently combines sample rate
conversions. It also accepts any channel count and will correctly upmix or downmix
to the output channel count.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioMixerNode : AVAudioNode <AVAudioMixing>
- (instancetype)init NS_DESIGNATED_INITIALIZER;
/*! @property outputVolume
@abstract The mixer's output volume.
@discussion
This accesses the mixer's output volume (0.0-1.0, inclusive).
*/
@property (nonatomic) float outputVolume;
/*! @property nextAvailableInputBus
@abstract Find an unused input bus.
@discussion
This will find and return the first input bus to which no other node is connected.
*/
@property (nonatomic, readonly) AVAudioNodeBus nextAvailableInputBus;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,253 +0,0 @@
/*
File: AVAudioMixing.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAudioNode, AVAudioConnectionPoint, AVAudioMixingDestination;
@protocol AVAudioStereoMixing;
@protocol AVAudio3DMixing;
/*! @protocol AVAudioMixing
@abstract Protocol that defines properties applicable to the input bus of a mixer
node
@discussion
Nodes that conforms to the AVAudioMixing protocol can talk to a mixer node downstream,
specifically of type AVAudioMixerNode or AVAudioEnvironmentNode. The properties defined
by this protocol apply to the respective input bus of the mixer node that the source node is
connected to. Note that effect nodes cannot talk to their downstream mixer.
Properties can be set either on the source node, or directly on individual mixer connections.
Source node properties are:
- applied to all existing mixer connections when set
- applied to new mixer connections
- preserved upon disconnection from mixers
- not affected by connections/disconnections to/from mixers
- not affected by any direct changes to properties on individual mixer connections
Individual mixer connection properties, when set, will override any values previously derived
from the corresponding source node properties. However, if a source node property is
subsequently set, it will override the corresponding property value of all individual mixer
connections.
Unlike source node properties, individual mixer connection properties are not preserved upon
disconnection (see `AVAudioMixing(destinationForMixer:bus:)` and `AVAudioMixingDestination`).
Source nodes that are connected to a mixer downstream can be disconnected from
one mixer and connected to another mixer with source node's mixing settings intact.
For example, an AVAudioPlayerNode that is being used in a gaming scenario can set up its
3D mixing settings and then move from one environment to another.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@protocol AVAudioMixing <AVAudioStereoMixing, AVAudio3DMixing>
/*! @method destinationForMixer:bus:
@abstract Returns the AVAudioMixingDestination object corresponding to specified mixer node and
its input bus
@discussion
When a source node is connected to multiple mixers downstream, setting AVAudioMixing
properties directly on the source node will apply the change to all the mixers downstream.
If you want to set/get properties on a specific mixer, use this method to get the
corresponding AVAudioMixingDestination and set/get properties on it.
Note:
- Properties set on individual AVAudioMixingDestination instances will not reflect at the
source node level.
- AVAudioMixingDestination reference returned by this method could become invalid when
there is any disconnection between the source and the mixer node. Hence this reference
should not be retained and should be fetched every time you want to set/get properties
on a specific mixer.
If the source node is not connected to the specified mixer/input bus, this method
returns nil.
Calling this on an AVAudioMixingDestination instance returns self if the specified
mixer/input bus match its connection point, otherwise returns nil.
*/
- (nullable AVAudioMixingDestination *)destinationForMixer:(AVAudioNode *)mixer bus:(AVAudioNodeBus)bus NS_AVAILABLE(10_11, 9_0);
/*! @property volume
@abstract Set a bus's input volume
@discussion
Range: 0.0 -> 1.0
Default: 1.0
Mixers: AVAudioMixerNode, AVAudioEnvironmentNode
*/
@property (nonatomic) float volume;
@end
/*! @protocol AVAudioStereoMixing
@abstract Protocol that defines stereo mixing properties
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@protocol AVAudioStereoMixing <NSObject>
/*! @property pan
@abstract Set a bus's stereo pan
@discussion
Range: -1.0 -> 1.0
Default: 0.0
Mixer: AVAudioMixerNode
*/
@property (nonatomic) float pan;
@end
/*! @enum AVAudio3DMixingRenderingAlgorithm
@abstract Types of rendering algorithms available per input bus of the environment node
@discussion
The rendering algorithms differ in terms of quality and cpu cost.
AVAudio3DMixingRenderingAlgorithmEqualPowerPanning is the simplest panning algorithm and also
the least expensive computationally.
With the exception of AVAudio3DMixingRenderingAlgorithmSoundField, while the mixer is
rendering to multi channel hardware, audio data will only be rendered to channels 1 & 2.
AVAudio3DMixingRenderingAlgorithmEqualPowerPanning
EqualPowerPanning merely pans the data of the mixer bus into a stereo field. This
algorithm is analogous to the pan knob found on a mixing board channel strip.
AVAudio3DMixingRenderingAlgorithmSphericalHead
SphericalHead is designed to emulate 3 dimensional space in headphones by simulating
inter-aural time delays and other spatial cues. SphericalHead is slightly less CPU
intensive than the HRTF algorithm.
AVAudio3DMixingRenderingAlgorithmHRTF
HRTF (Head Related Transfer Function) is a high quality algorithm using filtering to
emulate 3 dimensional space in headphones. HRTF is a cpu intensive algorithm.
AVAudio3DMixingRenderingAlgorithmHRTFHQ
Higher quality HRTF rendering algorithm compared to AVAudio3DMixingRenderingAlgorithmHRTF.
Improvements have been made to the overall frequency response and localization of
sources in a 3D space.
AVAudio3DMixingRenderingAlgorithmSoundField
SoundField is designed for rendering to multi channel hardware. The mixer takes data
being rendered with SoundField and distributes it amongst all the output channels with
a weighting toward the location in which the sound derives. It is very effective for
ambient sounds, which may derive from a specific location in space, yet should be heard
through the listener's entire space.
AVAudio3DMixingRenderingAlgorithmStereoPassThrough
StereoPassThrough should be used when no localization is desired for the source data.
Setting this algorithm tells the mixer to take mono/stereo input and pass it directly to
channels 1 & 2 without localization.
*/
typedef NS_ENUM(NSInteger, AVAudio3DMixingRenderingAlgorithm) {
AVAudio3DMixingRenderingAlgorithmEqualPowerPanning = 0,
AVAudio3DMixingRenderingAlgorithmSphericalHead = 1,
AVAudio3DMixingRenderingAlgorithmHRTF = 2,
AVAudio3DMixingRenderingAlgorithmSoundField = 3,
AVAudio3DMixingRenderingAlgorithmStereoPassThrough = 5,
AVAudio3DMixingRenderingAlgorithmHRTFHQ = 6
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @protocol AVAudio3DMixing
@abstract Protocol that defines 3D mixing properties
*/
@protocol AVAudio3DMixing <NSObject>
/*! @property renderingAlgorithm
@abstract Type of rendering algorithm used
@discussion
Depending on the current output format of the AVAudioEnvironmentNode, only a subset of the
rendering algorithms may be supported. An array of valid rendering algorithms can be
retrieved by calling applicableRenderingAlgorithms on AVAudioEnvironmentNode.
Default: AVAudio3DMixingRenderingAlgorithmEqualPowerPanning
Mixer: AVAudioEnvironmentNode
*/
@property (nonatomic) AVAudio3DMixingRenderingAlgorithm renderingAlgorithm __WATCHOS_PROHIBITED;
/*! @property rate
@abstract Changes the playback rate of the input signal
@discussion
A value of 2.0 results in the output audio playing one octave higher.
A value of 0.5, results in the output audio playing one octave lower.
Range: 0.5 -> 2.0
Default: 1.0
Mixer: AVAudioEnvironmentNode
*/
@property (nonatomic) float rate __WATCHOS_PROHIBITED;
/*! @property reverbBlend
@abstract Controls the blend of dry and reverb processed audio
@discussion
This property controls the amount of the source's audio that will be processed by the reverb
in AVAudioEnvironmentNode. A value of 0.5 will result in an equal blend of dry and processed
(wet) audio.
Range: 0.0 (completely dry) -> 1.0 (completely wet)
Default: 0.0
Mixer: AVAudioEnvironmentNode
*/
@property (nonatomic) float reverbBlend __WATCHOS_PROHIBITED;
/*! @property obstruction
@abstract Simulates filtering of the direct path of sound due to an obstacle
@discussion
Only the direct path of sound between the source and listener is blocked.
Range: -100.0 -> 0.0 dB
Default: 0.0
Mixer: AVAudioEnvironmentNode
*/
@property (nonatomic) float obstruction __WATCHOS_PROHIBITED;
/*! @property occlusion
@abstract Simulates filtering of the direct and reverb paths of sound due to an obstacle
@discussion
Both the direct and reverb paths of sound between the source and listener are blocked.
Range: -100.0 -> 0.0 dB
Default: 0.0
Mixer: AVAudioEnvironmentNode
*/
@property (nonatomic) float occlusion __WATCHOS_PROHIBITED;
/*! @property position
@abstract The location of the source in the 3D environment
@discussion
The coordinates are specified in meters.
Mixer: AVAudioEnvironmentNode
*/
@property (nonatomic) AVAudio3DPoint position __WATCHOS_PROHIBITED;
@end
/*! @class AVAudioMixingDestination
@abstract An object representing a connection to a mixer node from a node that
conforms to AVAudioMixing protocol
@discussion
A standalone instance of AVAudioMixingDestination cannot be created.
Only an instance vended by a source node (e.g. AVAudioPlayerNode) can be used
(see `AVAudioMixing`).
*/
NS_CLASS_AVAILABLE(10_11, 9_0)
@interface AVAudioMixingDestination : NSObject <AVAudioMixing> {
@private
void *_impl;
}
- (instancetype)init NS_UNAVAILABLE;
/*! @property connectionPoint
@abstract Returns the underlying mixer connection point
*/
@property (nonatomic, readonly) AVAudioConnectionPoint *connectionPoint;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,202 +0,0 @@
/*
File: AVAudioNode.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#if __has_include(<AudioToolbox/AudioUnit.h>) && __OBJC2__
#define AVAUDIONODE_HAVE_AUAUDIOUNIT 1
#import <AudioToolbox/AudioUnit.h>
#endif
NS_ASSUME_NONNULL_BEGIN
@class AVAudioEngine, AVAudioFormat, AVAudioInputNode, AVAudioMixerNode, AVAudioOutputNode, AVAudioPCMBuffer, AVAudioTime;
#if AVAUDIONODE_HAVE_AUAUDIOUNIT
@class AUAudioUnit;
#endif
/*! @typedef AVAudioNodeTapBlock
@abstract A block that receives copies of the output of an AVAudioNode.
@param buffer
a buffer of audio captured from the output of an AVAudioNode
@param when
the time at which the buffer was captured
@discussion
CAUTION: This callback may be invoked on a thread other than the main thread.
*/
typedef void (^AVAudioNodeTapBlock)(AVAudioPCMBuffer *buffer, AVAudioTime *when);
/*!
@class AVAudioNode
@abstract Base class for an audio generation, processing, or I/O block.
@discussion
`AVAudioEngine` objects contain instances of various AVAudioNode subclasses. This
base class provides certain common functionality.
Nodes have input and output busses, which can be thought of as connection points.
For example, an effect typically has one input bus and one output bus. A mixer
typically has multiple input busses and one output bus.
Busses have formats, expressed in terms of sample rate and channel count. When making
connections between nodes, often the format must match exactly. There are exceptions
(e.g. `AVAudioMixerNode` and `AVAudioOutputNode`).
Nodes do not currently provide useful functionality until attached to an engine.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioNode : NSObject {
@protected
void *_impl;
}
/*! @method reset
@abstract Clear a unit's previous processing state.
*/
- (void)reset;
/*! @method inputFormatForBus:
@abstract Obtain an input bus's format.
*/
- (AVAudioFormat *)inputFormatForBus:(AVAudioNodeBus)bus;
/*! @method outputFormatForBus:
@abstract Obtain an output bus's format.
*/
- (AVAudioFormat *)outputFormatForBus:(AVAudioNodeBus)bus;
/*! @method nameForInputBus:
@abstract Return the name of an input bus.
*/
- (nullable NSString *)nameForInputBus:(AVAudioNodeBus)bus;
/*! @method nameForOutputBus:
@abstract Return the name of an output bus.
*/
- (nullable NSString *)nameForOutputBus:(AVAudioNodeBus)bus;
/*! @method installTapOnBus:bufferSize:format:block:
@abstract Create a "tap" to record/monitor/observe the output of the node.
@param bus
the node output bus to which to attach the tap
@param bufferSize
the requested size of the incoming buffers in sample frames. Supported range is [100, 400] ms.
@param format
If non-nil, attempts to apply this as the format of the specified output bus. This should
only be done when attaching to an output bus which is not connected to another node; an
error will result otherwise.
The tap and connection formats (if non-nil) on the specified bus should be identical.
Otherwise, the latter operation will override any previously set format.
@param tapBlock
a block to be called with audio buffers
@discussion
Only one tap may be installed on any bus. Taps may be safely installed and removed while
the engine is running.
Note that if you have a tap installed on AVAudioOutputNode, there could be a mismatch
between the tap buffer format and AVAudioOutputNode's output format, depending on the
underlying physical device. Hence, instead of tapping the AVAudioOutputNode, it is
advised to tap the node connected to it.
E.g. to capture audio from input node:
<pre>
AVAudioEngine *engine = [[AVAudioEngine alloc] init];
AVAudioInputNode *input = [engine inputNode];
AVAudioFormat *format = [input outputFormatForBus: 0];
[input installTapOnBus: 0 bufferSize: 8192 format: format block: ^(AVAudioPCMBuffer *buf, AVAudioTime *when) {
// buf' contains audio captured from input node at time 'when'
}];
....
// start engine
</pre>
*/
- (void)installTapOnBus:(AVAudioNodeBus)bus bufferSize:(AVAudioFrameCount)bufferSize format:(AVAudioFormat * __nullable)format block:(AVAudioNodeTapBlock)tapBlock;
/*! @method removeTapOnBus:
@abstract Destroy a tap.
@param bus
the node output bus whose tap is to be destroyed
@return
YES for success.
*/
- (void)removeTapOnBus:(AVAudioNodeBus)bus;
/*! @property engine
@abstract The engine to which the node is attached (or nil).
*/
@property (nonatomic, readonly, nullable) AVAudioEngine *engine;
/*! @property numberOfInputs
@abstract The node's number of input busses.
*/
@property (nonatomic, readonly) NSUInteger numberOfInputs;
/*! @property numberOfOutputs
@abstract The node's number of output busses.
*/
@property (nonatomic, readonly) NSUInteger numberOfOutputs;
/*! @property lastRenderTime
@abstract Obtain the time for which the node most recently rendered.
@discussion
Will return nil if the engine is not running or if the node is not connected to an input or
output node.
*/
@property (nonatomic, readonly, nullable) AVAudioTime *lastRenderTime;
#if AVAUDIONODE_HAVE_AUAUDIOUNIT
/*! @property AUAudioUnit
@abstract An AUAudioUnit wrapping or underlying the implementation's AudioUnit.
@discussion
This provides an AUAudioUnit which either wraps or underlies the implementation's
AudioUnit, depending on how that audio unit is packaged. Applications can interact with this
AUAudioUnit to control custom properties, select presets, change parameters, etc.
No operations that may conflict with state maintained by the engine should be performed
directly on the audio unit. These include changing initialization state, stream formats,
channel layouts or connections to other audio units.
*/
@property (nonatomic, readonly) AUAudioUnit *AUAudioUnit API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
#endif // AVAUDIONODE_HAVE_AUAUDIOUNIT
/*! @property latency
@abstract The processing latency of the node, in seconds.
@discussion
This property reflects the delay between when an impulse in the audio stream arrives at the
input vs. output of the node. This should reflect the delay due to signal processing
(e.g. filters, FFT's, etc.), not delay or reverberation which is being applied as an effect.
A value of zero indicates either no latency or an unknown latency.
*/
@property (nonatomic, readonly) NSTimeInterval latency API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @property outputPresentationLatency
@abstract The maximum render pipeline latency downstream of the node, in seconds.
@discussion
This describes the maximum time it will take for the audio at the output of a node to be
presented.
For instance, the output presentation latency of the output node in the engine is:
- zero in manual rendering mode
- the presentation latency of the device itself when rendering to an audio device
(see `AVAudioIONode(presentationLatency)`)
The output presentation latency of a node connected directly to the output node is the
output node's presentation latency plus the output node's processing latency (see `latency`).
For a node which is exclusively in the input node chain (i.e. not connected to engine's
output node), this property reflects the latency for the output of this node to be
presented at the output of the terminating node in the input chain.
A value of zero indicates either an unknown or no latency.
Note that this latency value can change as the engine is reconfigured (started/stopped,
connections made/altered downstream of this node etc.). So it is recommended not to cache
this value and fetch it whenever it's needed.
*/
@property (nonatomic, readonly) NSTimeInterval outputPresentationLatency API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
@end
NS_ASSUME_NONNULL_END

View file

@ -1,150 +0,0 @@
/*
File: AVAudioPlayer.h
Framework: AVFoundation
Copyright 2008-2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVAudioFormat.h>
#import <Foundation/Foundation.h>
#import <AVFAudio/AVAudioSettings.h>
#if (TARGET_OS_IPHONE && __has_include(<AVFoundation/AVAudioSession.h>))
#import <AVFAudio/AVAudioSession.h>
#endif // #if TARGET_OS_EMBEDDED
#import <Availability.h>
NS_ASSUME_NONNULL_BEGIN
@class NSData, NSURL, NSError;
#if (TARGET_OS_IPHONE && __has_include(<AVFoundation/AVAudioSession.h>))
@class AVAudioSessionChannelDescription;
#endif
@protocol AVAudioPlayerDelegate;
NS_CLASS_AVAILABLE(10_7, 2_2) __WATCHOS_AVAILABLE(3_0)
@interface AVAudioPlayer : NSObject {
@private
id _impl;
}
/* For all of these init calls, if a return value of nil is given you can check outError to see what the problem was.
If not nil, then the object is usable for playing
*/
/* all data must be in the form of an audio file understood by CoreAudio */
- (nullable instancetype)initWithContentsOfURL:(NSURL *)url error:(NSError **)outError;
- (nullable instancetype)initWithData:(NSData *)data error:(NSError **)outError;
/* The file type hint is a constant defined in AVMediaFormat.h whose value is a UTI for a file format. e.g. AVFileTypeAIFF. */
/* Sometimes the type of a file cannot be determined from the data, or it is actually corrupt. The file type hint tells the parser what kind of data to look for so that files which are not self identifying or possibly even corrupt can be successfully parsed. */
- (nullable instancetype)initWithContentsOfURL:(NSURL *)url fileTypeHint:(NSString * __nullable)utiString error:(NSError **)outError NS_AVAILABLE(10_9, 7_0);
- (nullable instancetype)initWithData:(NSData *)data fileTypeHint:(NSString * __nullable)utiString error:(NSError **)outError NS_AVAILABLE(10_9, 7_0);
/* transport control */
/* methods that return BOOL return YES on success and NO on failure. */
- (BOOL)prepareToPlay; /* get ready to play the sound. happens automatically on play. */
- (BOOL)play; /* sound is played asynchronously. */
- (BOOL)playAtTime:(NSTimeInterval)time NS_AVAILABLE(10_7, 4_0); /* play a sound some time in the future. time is an absolute time based on and greater than deviceCurrentTime. */
- (void)pause; /* pauses playback, but remains ready to play. */
- (void)stop; /* stops playback. no longer ready to play. */
/* properties */
@property(readonly, getter=isPlaying) BOOL playing; /* is it playing or not? */
@property(readonly) NSUInteger numberOfChannels;
@property(readonly) NSTimeInterval duration; /* the duration of the sound. */
#if !TARGET_OS_IPHONE
/* the UID of the current audio device (as a string) */
@property(copy, nullable) NSString *currentDevice API_AVAILABLE(macos(10.13));
#endif
/* the delegate will be sent messages from the AVAudioPlayerDelegate protocol */
@property(assign, nullable) id<AVAudioPlayerDelegate> delegate;
/* one of these properties will be non-nil based on the init... method used */
@property(readonly, nullable) NSURL *url; /* returns nil if object was not created with a URL */
@property(readonly, nullable) NSData *data; /* returns nil if object was not created with a data object */
@property float pan NS_AVAILABLE(10_7, 4_0); /* set panning. -1.0 is left, 0.0 is center, 1.0 is right. */
@property float volume; /* The volume for the sound. The nominal range is from 0.0 to 1.0. */
- (void)setVolume:(float)volume fadeDuration:(NSTimeInterval)duration API_AVAILABLE(macos(10.12), ios(10.0), watchos(3.0), tvos(10.0)); /* fade to a new volume over a duration */
@property BOOL enableRate NS_AVAILABLE(10_8, 5_0); /* You must set enableRate to YES for the rate property to take effect. You must set this before calling prepareToPlay. */
@property float rate NS_AVAILABLE(10_8, 5_0); /* See enableRate. The playback rate for the sound. 1.0 is normal, 0.5 is half speed, 2.0 is double speed. */
/* If the sound is playing, currentTime is the offset into the sound of the current playback position.
If the sound is not playing, currentTime is the offset into the sound where playing would start. */
@property NSTimeInterval currentTime;
/* returns the current time associated with the output device */
@property(readonly) NSTimeInterval deviceCurrentTime NS_AVAILABLE(10_7, 4_0);
/* "numberOfLoops" is the number of times that the sound will return to the beginning upon reaching the end.
A value of zero means to play the sound just once.
A value of one will result in playing the sound twice, and so on..
Any negative number will loop indefinitely until stopped.
*/
@property NSInteger numberOfLoops;
/* settings */
@property(readonly) NSDictionary<NSString *, id> *settings NS_AVAILABLE(10_7, 4_0); /* returns a settings dictionary with keys as described in AVAudioSettings.h */
/* returns the format of the audio data */
@property(readonly) AVAudioFormat *format API_AVAILABLE(macos(10.12), ios(10.0), watchos(3.0), tvos(10.0));
/* metering */
@property(getter=isMeteringEnabled) BOOL meteringEnabled; /* turns level metering on or off. default is off. */
- (void)updateMeters; /* call to refresh meter values */
- (float)peakPowerForChannel:(NSUInteger)channelNumber; /* returns peak power in decibels for a given channel */
- (float)averagePowerForChannel:(NSUInteger)channelNumber; /* returns average power in decibels for a given channel */
#if (TARGET_OS_IPHONE && __has_include(<AVFoundation/AVAudioSession.h>))
/* The channels property lets you assign the output to play to specific channels as described by AVAudioSession's channels property */
/* This property is nil valued until set. */
/* The array must have the same number of channels as returned by the numberOfChannels property. */
@property(nonatomic, copy, nullable) NSArray<AVAudioSessionChannelDescription *> *channelAssignments NS_AVAILABLE(10_9, 7_0); /* Array of AVAudioSessionChannelDescription objects */
#endif
@end
/* A protocol for delegates of AVAudioPlayer */
__WATCHOS_AVAILABLE(3_0)
@protocol AVAudioPlayerDelegate <NSObject>
@optional
/* audioPlayerDidFinishPlaying:successfully: is called when a sound has finished playing. This method is NOT called if the player is stopped due to an interruption. */
- (void)audioPlayerDidFinishPlaying:(AVAudioPlayer *)player successfully:(BOOL)flag;
/* if an error occurs while decoding it will be reported to the delegate. */
- (void)audioPlayerDecodeErrorDidOccur:(AVAudioPlayer *)player error:(NSError * __nullable)error;
#if TARGET_OS_IPHONE
/* AVAudioPlayer INTERRUPTION NOTIFICATIONS ARE DEPRECATED - Use AVAudioSession instead. */
/* audioPlayerBeginInterruption: is called when the audio session has been interrupted while the player was playing. The player will have been paused. */
- (void)audioPlayerBeginInterruption:(AVAudioPlayer *)player NS_DEPRECATED_IOS(2_2, 8_0);
/* audioPlayerEndInterruption:withOptions: is called when the audio session interruption has ended and this player had been interrupted while playing. */
/* Currently the only flag is AVAudioSessionInterruptionFlags_ShouldResume. */
- (void)audioPlayerEndInterruption:(AVAudioPlayer *)player withOptions:(NSUInteger)flags NS_DEPRECATED_IOS(6_0, 8_0);
- (void)audioPlayerEndInterruption:(AVAudioPlayer *)player withFlags:(NSUInteger)flags NS_DEPRECATED_IOS(4_0, 6_0);
/* audioPlayerEndInterruption: is called when the preferred method, audioPlayerEndInterruption:withFlags:, is not implemented. */
- (void)audioPlayerEndInterruption:(AVAudioPlayer *)player NS_DEPRECATED_IOS(2_2, 6_0);
#endif // TARGET_OS_IPHONE
@end
NS_ASSUME_NONNULL_END

View file

@ -1,381 +0,0 @@
/*
File: AVAudioPlayerNode.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioNode.h>
#import <AVFAudio/AVAudioFile.h>
#import <AVFAudio/AVAudioMixing.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAudioTime;
/*!
@enum AVAudioPlayerNodeBufferOptions
@abstract Options controlling buffer scheduling.
@constant AVAudioPlayerNodeBufferLoops
The buffer loops indefinitely.
@constant AVAudioPlayerNodeBufferInterrupts
The buffer interrupts any buffer already playing.
@constant AVAudioPlayerNodeBufferInterruptsAtLoop
The buffer interrupts any buffer already playing, at its loop point.
*/
typedef NS_OPTIONS(NSUInteger, AVAudioPlayerNodeBufferOptions) {
AVAudioPlayerNodeBufferLoops = 1UL << 0, // 0x01
AVAudioPlayerNodeBufferInterrupts = 1UL << 1, // 0x02
AVAudioPlayerNodeBufferInterruptsAtLoop = 1UL << 2 // 0x04
} NS_AVAILABLE(10_10, 8_0);
/*!
@enum AVAudioPlayerNodeCompletionCallbackType
@abstract Specifies when the completion handler must be invoked.
@constant AVAudioPlayerNodeCompletionDataConsumed
The buffer or file data has been consumed by the player.
@constant AVAudioPlayerNodeCompletionDataRendered
The buffer or file data has been rendered (i.e. output) by the player. This
does not account for any signal processing latencies downstream of the player
in the engine (see `AVAudioNode(outputPresentationLatency)`).
@constant AVAudioPlayerNodeCompletionDataPlayedBack
Applicable only when the engine is rendering to/from an audio device.
The buffer or file has finished playing. This accounts for both (small) signal
processing latencies downstream of the player in the engine, as well as
(possibly significant) latency in the audio playback device.
*/
typedef NS_ENUM(NSInteger, AVAudioPlayerNodeCompletionCallbackType) {
AVAudioPlayerNodeCompletionDataConsumed = 0,
AVAudioPlayerNodeCompletionDataRendered = 1,
AVAudioPlayerNodeCompletionDataPlayedBack = 2,
} API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @typedef AVAudioPlayerNodeCompletionHandler
@abstract Buffer or file completion callback handler.
@param callbackType
Indicates the type of buffer or file completion when the callback is invoked.
@discussion
AVAudioPlayerNode issues this callback to inform the client about the specific type of
buffer or file completion. See `AVAudioPlayerNodeCompletionCallbackType` for more details.
Note that the `AVAudioNodeCompletionHandler` callback from some of the player's scheduling
methods (e.g. `scheduleBuffer:completionHandler:`) is equivalent to the
AVAudioPlayerNodeCompletionHandler callback for `AVAudioPlayerNodeCompletionDataConsumed`.
In general the callbacks arrive on a non-main thread and it is the client's responsibility
to handle them in a thread-safe manner.
*/
typedef void (^AVAudioPlayerNodeCompletionHandler)(AVAudioPlayerNodeCompletionCallbackType callbackType) API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*!
@class AVAudioPlayerNode
@abstract Play buffers or segments of audio files.
@discussion
AVAudioPlayerNode supports scheduling the playback of `AVAudioBuffer` instances,
or segments of audio files opened via `AVAudioFile`. Buffers and segments may be
scheduled at specific points in time, or to play immediately following preceding segments.
FORMATS
Normally, you will want to configure the node's output format with the same number of
channels as are in the files and buffers to be played. Otherwise, channels will be dropped
or added as required. It is usually better to use an `AVAudioMixerNode` to
do this.
Similarly, when playing file segments, the node will sample rate convert if necessary, but
it is often preferable to configure the node's output sample rate to match that of the file(s)
and use a mixer to perform the rate conversion.
When playing buffers, there is an implicit assumption that the buffers are at the same
sample rate as the node's output format.
TIMELINES
The usual `AVAudioNode` sample times (as observed by `lastRenderTime`)
have an arbitrary zero point. AVAudioPlayerNode superimposes a second "player timeline" on
top of this, to reflect when the player was started, and intervals during which it was
paused. The methods `nodeTimeForPlayerTime:` and `playerTimeForNodeTime:`
convert between the two.
This class' `stop` method unschedules all previously scheduled buffers and
file segments, and returns the player timeline to sample time 0.
TIMESTAMPS
The "schedule" methods all take an `AVAudioTime` "when" parameter. This is
interpreted as follows:
1. nil:
- if there have been previous commands, the new one is played immediately following the
last one.
- otherwise, if the node is playing, the event is played in the very near future.
- otherwise, the command is played at sample time 0.
2. sample time:
- relative to the node's start time (which begins at 0 when the node is started).
3. host time:
- ignored unless the sample time is invalid when the engine is rendering to an audio
device.
- ignored in manual rendering mode.
ERRORS
The "schedule" methods can fail if:
1. a buffer's channel count does not match that of the node's output format.
2. a file can't be accessed.
3. an AVAudioTime specifies neither a valid sample time or host time.
4. a segment's start frame or frame count is negative.
BUFFER/FILE COMPLETION HANDLERS
The buffer or file completion handlers (see scheduling methods) are a means to schedule
more data if available on the player node. See `AVAudioPlayerNodeCompletionCallbackType`
for details on the different buffer/file completion callback types.
Note that a player should not be stopped from within a completion handler callback because
it can deadlock while trying to unschedule previously scheduled buffers.
OFFLINE RENDERING
When a player node is used with the engine operating in the manual rendering mode, the
buffer/file completion handlers, `lastRenderTime` and the latencies (`latency` and
`outputPresentationLatency`) can be used to track how much data the player has rendered and
how much more data is left to render.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioPlayerNode : AVAudioNode <AVAudioMixing>
- (instancetype)init NS_DESIGNATED_INITIALIZER;
/*! @method scheduleBuffer:completionHandler:
@abstract Schedule playing samples from an AVAudioBuffer.
@param buffer
the buffer to play
@param completionHandler
called after the buffer has been consumed by the player or the player is stopped. may be nil.
@discussion
Schedules the buffer to be played following any previously scheduled commands.
It is possible for the completionHandler to be called before rendering begins
or before the buffer is played completely.
*/
- (void)scheduleBuffer:(AVAudioPCMBuffer *)buffer completionHandler:(AVAudioNodeCompletionHandler __nullable)completionHandler;
/*! @method scheduleBuffer:completionCallbackType:completionHandler:
@abstract Schedule playing samples from an AVAudioBuffer.
@param buffer
the buffer to play
@param callbackType
option to specify when the completion handler must be called
@param completionHandler
called after the buffer has been consumed by the player or has finished playing back or
the player is stopped. may be nil.
@discussion
Schedules the buffer to be played following any previously scheduled commands.
*/
- (void)scheduleBuffer:(AVAudioPCMBuffer *)buffer completionCallbackType:(AVAudioPlayerNodeCompletionCallbackType)callbackType completionHandler:(AVAudioPlayerNodeCompletionHandler __nullable)completionHandler API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @method scheduleBuffer:atTime:options:completionHandler:
@abstract Schedule playing samples from an AVAudioBuffer.
@param buffer
the buffer to play
@param when
the time at which to play the buffer. see the discussion of timestamps, above.
@param options
options for looping, interrupting other buffers, etc.
@param completionHandler
called after the buffer has been consumed by the player or the player is stopped. may be nil.
@discussion
It is possible for the completionHandler to be called before rendering begins
or before the buffer is played completely.
*/
- (void)scheduleBuffer:(AVAudioPCMBuffer *)buffer atTime:(AVAudioTime * __nullable)when options:(AVAudioPlayerNodeBufferOptions)options completionHandler:(AVAudioNodeCompletionHandler __nullable)completionHandler;
/*! @method scheduleBuffer:atTime:options:completionCallbackType:completionHandler:
@abstract Schedule playing samples from an AVAudioBuffer.
@param buffer
the buffer to play
@param when
the time at which to play the buffer. see the discussion of timestamps, above.
@param options
options for looping, interrupting other buffers, etc.
@param callbackType
option to specify when the completion handler must be called
@param completionHandler
called after the buffer has been consumed by the player or has finished playing back or
the player is stopped. may be nil.
*/
- (void)scheduleBuffer:(AVAudioPCMBuffer *)buffer atTime:(AVAudioTime * __nullable)when options:(AVAudioPlayerNodeBufferOptions)options
completionCallbackType:(AVAudioPlayerNodeCompletionCallbackType)callbackType completionHandler:(AVAudioPlayerNodeCompletionHandler __nullable)completionHandler API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @method scheduleFile:atTime:completionHandler:
@abstract Schedule playing of an entire audio file.
@param file
the file to play
@param when
the time at which to play the file. see the discussion of timestamps, above.
@param completionHandler
called after the file has been consumed by the player or the player is stopped. may be nil.
@discussion
It is possible for the completionHandler to be called before rendering begins
or before the file is played completely.
*/
- (void)scheduleFile:(AVAudioFile *)file atTime:(AVAudioTime * __nullable)when completionHandler:(AVAudioNodeCompletionHandler __nullable)completionHandler;
/*! @method scheduleFile:atTime:completionCallbackType:completionHandler:
@abstract Schedule playing of an entire audio file.
@param file
the file to play
@param when
the time at which to play the file. see the discussion of timestamps, above.
@param callbackType
option to specify when the completion handler must be called
@param completionHandler
called after the file has been consumed by the player or has finished playing back or
the player is stopped. may be nil.
*/
- (void)scheduleFile:(AVAudioFile *)file atTime:(AVAudioTime * __nullable)when completionCallbackType:(AVAudioPlayerNodeCompletionCallbackType)callbackType completionHandler:(AVAudioPlayerNodeCompletionHandler __nullable)completionHandler API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @method scheduleSegment:startingFrame:frameCount:atTime:completionHandler:
@abstract Schedule playing a segment of an audio file.
@param file
the file to play
@param startFrame
the starting frame position in the stream
@param numberFrames
the number of frames to play
@param when
the time at which to play the region. see the discussion of timestamps, above.
@param completionHandler
called after the segment has been consumed by the player or the player is stopped. may be nil.
@discussion
It is possible for the completionHandler to be called before rendering begins
or before the segment is played completely.
*/
- (void)scheduleSegment:(AVAudioFile *)file startingFrame:(AVAudioFramePosition)startFrame frameCount:(AVAudioFrameCount)numberFrames atTime:(AVAudioTime * __nullable)when completionHandler:(AVAudioNodeCompletionHandler __nullable)completionHandler;
/*! @method scheduleSegment:startingFrame:frameCount:atTime:completionCallbackType:completionHandler:
@abstract Schedule playing a segment of an audio file.
@param file
the file to play
@param startFrame
the starting frame position in the stream
@param numberFrames
the number of frames to play
@param when
the time at which to play the region. see the discussion of timestamps, above.
@param callbackType
option to specify when the completion handler must be called
@param completionHandler
called after the segment has been consumed by the player or has finished playing back or
the player is stopped. may be nil.
*/
- (void)scheduleSegment:(AVAudioFile *)file startingFrame:(AVAudioFramePosition)startFrame frameCount:(AVAudioFrameCount)numberFrames atTime:(AVAudioTime * __nullable)when
completionCallbackType:(AVAudioPlayerNodeCompletionCallbackType)callbackType completionHandler:(AVAudioPlayerNodeCompletionHandler __nullable)completionHandler API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
/*! @method stop
@abstract Clear all of the node's previously scheduled events and stop playback.
@discussion
All of the node's previously scheduled events are cleared, including any that are in the
middle of playing. The node's sample time (and therefore the times to which new events are
to be scheduled) is reset to 0, and will not proceed until the node is started again (via
play or playAtTime).
Note that pausing or stopping all the players connected to an engine does not pause or stop
the engine or the underlying hardware. The engine must be explicitly paused or stopped for
the hardware to stop.
*/
- (void)stop;
/*! @method prepareWithFrameCount:
@abstract Prepares previously scheduled file regions or buffers for playback.
@param frameCount
The number of sample frames of data to be prepared before returning.
@discussion
*/
- (void)prepareWithFrameCount:(AVAudioFrameCount)frameCount;
/*! @method play
@abstract Start or resume playback immediately.
@discussion
equivalent to playAtTime:nil
*/
- (void)play;
/*! @method playAtTime:
@abstract Start or resume playback at a specific time.
@param when
the node time at which to start or resume playback. nil signifies "now".
@discussion
This node is initially paused. Requests to play buffers or file segments are enqueued, and
any necessary decoding begins immediately. Playback does not begin, however, until the player
has started playing, via this method.
E.g. To start a player X seconds in future:
<pre>
// start engine and player
NSError *nsErr = nil;
[_engine startAndReturnError:&nsErr];
if (!nsErr) {
const float kStartDelayTime = 0.5; // sec
AVAudioFormat *outputFormat = [_player outputFormatForBus:0];
AVAudioFramePosition startSampleTime = _player.lastRenderTime.sampleTime + kStartDelayTime * outputFormat.sampleRate;
AVAudioTime *startTime = [AVAudioTime timeWithSampleTime:startSampleTime atRate:outputFormat.sampleRate];
[_player playAtTime:startTime];
}
</pre>
*/
- (void)playAtTime:(AVAudioTime * __nullable)when;
/*! @method pause
@abstract Pause playback.
@discussion
The player's sample time does not advance while the node is paused.
Note that pausing or stopping all the players connected to an engine does not pause or stop
the engine or the underlying hardware. The engine must be explicitly paused or stopped for
the hardware to stop.
*/
- (void)pause;
/*! @method nodeTimeForPlayerTime:
@abstract
Convert from player time to node time.
@param playerTime
a time relative to the player's start time
@return
a node time
@discussion
This method and its inverse `playerTimeForNodeTime:` are discussed in the
introduction to this class.
If the player is not playing when this method is called, nil is returned.
*/
- (AVAudioTime * __nullable)nodeTimeForPlayerTime:(AVAudioTime *)playerTime;
/*! @method playerTimeForNodeTime:
@abstract
Convert from node time to player time.
@param nodeTime
a node time
@return
a time relative to the player's start time
@discussion
This method and its inverse `nodeTimeForPlayerTime:` are discussed in the
introduction to this class.
If the player is not playing when this method is called, nil is returned.
*/
- (AVAudioTime * __nullable)playerTimeForNodeTime:(AVAudioTime *)nodeTime;
/*! @property playing
@abstract Indicates whether or not the player is playing.
*/
@property(nonatomic, readonly, getter=isPlaying) BOOL playing;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,121 +0,0 @@
/*
File: AVAudioRecorder.h
Framework: AVFoundation
Copyright 2008-2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVAudioFormat.h>
#import <Foundation/Foundation.h>
#import <AVFAudio/AVAudioSettings.h>
#if TARGET_OS_IPHONE
#import <AVFAudio/AVAudioSession.h>
#endif // #if TARGET_OS_EMBEDDED
#import <Availability.h>
NS_ASSUME_NONNULL_BEGIN
@protocol AVAudioRecorderDelegate;
@class NSURL, NSError;
API_AVAILABLE(macos(10.7), ios(3.0), watchos(4.0)) API_UNAVAILABLE(tvos)
@interface AVAudioRecorder : NSObject {
@private
void *_impl;
}
/* The file type to create can be set through the corresponding settings key. If not set, it will be inferred from the file extension. Will overwrite a file at the specified url if a file exists. */
- (nullable instancetype)initWithURL:(NSURL *)url settings:(NSDictionary<NSString *, id> *)settings error:(NSError **)outError;
/* The file type to create can be set through the corresponding settings key. If not set, it will be inferred from the file extension. Will overwrite a file at the specified url if a file exists. */
- (nullable instancetype)initWithURL:(NSURL *)url format:(AVAudioFormat *)format error:(NSError **)outError API_AVAILABLE(macos(10.12), ios(10.0), watchos(4.0)) API_UNAVAILABLE(tvos);
/* transport control */
/* methods that return BOOL return YES on success and NO on failure. */
- (BOOL)prepareToRecord; /* creates the file and gets ready to record. happens automatically on record. */
- (BOOL)record; /* start or resume recording to file. */
- (BOOL)recordAtTime:(NSTimeInterval)time NS_AVAILABLE_IOS(6_0); /* start recording at specified time in the future. time is an absolute time based on and greater than deviceCurrentTime. */
- (BOOL)recordForDuration:(NSTimeInterval) duration; /* record a file of a specified duration. the recorder will stop when it has recorded this length of audio */
- (BOOL)recordAtTime:(NSTimeInterval)time forDuration:(NSTimeInterval) duration NS_AVAILABLE_IOS(6_0); /* record a file of a specified duration starting at specified time. time is an absolute time based on and greater than deviceCurrentTime. */
- (void)pause; /* pause recording */
- (void)stop; /* stops recording. closes the file. */
- (BOOL)deleteRecording; /* delete the recorded file. recorder must be stopped. returns NO on failure. */
/* properties */
@property(readonly, getter=isRecording) BOOL recording; /* is it recording or not? */
@property(readonly) NSURL *url; /* URL of the recorded file */
/* these settings are fully valid only when prepareToRecord has been called */
@property(readonly) NSDictionary<NSString *, id> *settings;
/* this object is fully valid only when prepareToRecord has been called */
@property(readonly) AVAudioFormat *format API_AVAILABLE(macos(10.12), ios(10.0), watchos(4.0)) API_UNAVAILABLE(tvos);
/* the delegate will be sent messages from the AVAudioRecorderDelegate protocol */
@property(assign, nullable) id<AVAudioRecorderDelegate> delegate;
/* get the current time of the recording - only valid while recording */
@property(readonly) NSTimeInterval currentTime;
/* get the device current time - always valid */
@property(readonly) NSTimeInterval deviceCurrentTime NS_AVAILABLE_IOS(6_0);
/* metering */
@property(getter=isMeteringEnabled) BOOL meteringEnabled; /* turns level metering on or off. default is off. */
- (void)updateMeters; /* call to refresh meter values */
- (float)peakPowerForChannel:(NSUInteger)channelNumber; /* returns peak power in decibels for a given channel */
- (float)averagePowerForChannel:(NSUInteger)channelNumber; /* returns average power in decibels for a given channel */
#if TARGET_OS_IPHONE
/* The channels property lets you assign the output to record specific channels as described by AVAudioSession's channels property */
/* This property is nil valued until set. */
/* The array must have the same number of channels as returned by the numberOfChannels property. */
@property(nonatomic, copy, nullable) NSArray<AVAudioSessionChannelDescription *> *channelAssignments NS_AVAILABLE(10_9, 7_0); /* Array of AVAudioSessionChannelDescription objects */
#endif
@end
/* A protocol for delegates of AVAudioRecorder */
API_AVAILABLE(macos(10.7), ios(3.0), watchos(4.0)) API_UNAVAILABLE(tvos)
@protocol AVAudioRecorderDelegate <NSObject>
@optional
/* audioRecorderDidFinishRecording:successfully: is called when a recording has been finished or stopped. This method is NOT called if the recorder is stopped due to an interruption. */
- (void)audioRecorderDidFinishRecording:(AVAudioRecorder *)recorder successfully:(BOOL)flag;
/* if an error occurs while encoding it will be reported to the delegate. */
- (void)audioRecorderEncodeErrorDidOccur:(AVAudioRecorder *)recorder error:(NSError * __nullable)error;
#if TARGET_OS_IPHONE
/* AVAudioRecorder INTERRUPTION NOTIFICATIONS ARE DEPRECATED - Use AVAudioSession instead. */
/* audioRecorderBeginInterruption: is called when the audio session has been interrupted while the recorder was recording. The recorded file will be closed. */
- (void)audioRecorderBeginInterruption:(AVAudioRecorder *)recorder NS_DEPRECATED_IOS(2_2, 8_0);
/* audioRecorderEndInterruption:withOptions: is called when the audio session interruption has ended and this recorder had been interrupted while recording. */
/* Currently the only flag is AVAudioSessionInterruptionFlags_ShouldResume. */
- (void)audioRecorderEndInterruption:(AVAudioRecorder *)recorder withOptions:(NSUInteger)flags NS_DEPRECATED_IOS(6_0, 8_0);
- (void)audioRecorderEndInterruption:(AVAudioRecorder *)recorder withFlags:(NSUInteger)flags NS_DEPRECATED_IOS(4_0, 6_0);
/* audioRecorderEndInterruption: is called when the preferred method, audioRecorderEndInterruption:withFlags:, is not implemented. */
- (void)audioRecorderEndInterruption:(AVAudioRecorder *)recorder NS_DEPRECATED_IOS(2_2, 6_0);
#endif // TARGET_OS_IPHONE
@end
NS_ASSUME_NONNULL_END

View file

@ -1,372 +0,0 @@
/*
File: AVAudioSequencer.h
Framework: AVFoundation
Copyright (c) 2015 Apple Inc. All Rights Reserved.
*/
#import <Foundation/Foundation.h>
#if __has_include(<CoreMIDI/MIDIServices.h>)
#import <CoreMIDI/MIDIServices.h>
#endif
NS_ASSUME_NONNULL_BEGIN
@class AVAudioUnit;
@class AVAudioTime;
@class AVAudioEngine;
@class AVMusicTrack;
@class AVMusicTrackEventIterator;
@class AVAudioSequencer;
/*! @typedef AVMusicTimeStamp
@abstract A fractional number of beats
@discussion
This is used for all sequencer timeline-related methods. The relationship between this
value and time in seconds is determined by the sequence's tempo.
*/
typedef Float64 AVMusicTimeStamp;
/*! @typedef AVMusicSequenceLoadOptions
@abstract Determines whether data on different MIDI channels is mapped to multiple tracks, or
if the tracks are preserved as-is.
@discussion
If AVMusicSequenceLoadSMF_ChannelsToTracks is set, the loaded MIDI Sequence will contain a
tempo track, one track for each MIDI channel that is found in the SMF, and one track for
SysEx and/or MetaEvents (this will be the last track in the sequence).
If AVMusicSequenceLoadSMF_ChannelsToTracks is not set, the loadad MIDI Sequence will
contain one track for each track that is found in the SMF, plus a tempo track (if not found
in the SMF).
*/
typedef NS_OPTIONS(NSUInteger, AVMusicSequenceLoadOptions) {
AVMusicSequenceLoadSMF_PreserveTracks = 0, // 0x00
AVMusicSequenceLoadSMF_ChannelsToTracks = (1UL << 0) // 0x01
} NS_AVAILABLE(10_11, 9_0);
/*! @typedef AVBeatRange
@abstract Used to describe a specific time range within an AVMusicTrack.
*/
typedef struct _AVBeatRange {
AVMusicTimeStamp start;
AVMusicTimeStamp length;
} AVBeatRange;
NS_INLINE AVBeatRange AVMakeBeatRange(AVMusicTimeStamp startBeat, AVMusicTimeStamp lengthInBeats) {
AVBeatRange r;
r.start = startBeat;
r.length = lengthInBeats;
return r;
}
/*! @class AVAudioSequencer
@abstract A collection of MIDI events organized into AVMusicTracks, plus a player to play back the events.
*/
NS_CLASS_AVAILABLE(10_11, 9_0) __WATCHOS_PROHIBITED
@interface AVAudioSequencer : NSObject {
@protected
void *_impl;
}
/*! @method init
@abstract
Initialize a new sequencer, which will not be connected to an audio engine.
@discussion
This is used to create a sequencer whose tracks will only send events to external MIDI endpoints.
*/
- (instancetype)init __TVOS_UNAVAILABLE;
/*! @method initWithAudioEngine:
@abstract
Initialize a new sequencer, handing it the audio engine.
*/
- (instancetype)initWithAudioEngine:(AVAudioEngine *)engine;
/*! @method loadFromURL:options:error:
@abstract Load the file referenced by the URL and add the events to the sequence
@param fileURL
@param options
determines how the file's contents are mapped to tracks inside the sequence
@param outError
*/
- (BOOL)loadFromURL:(NSURL *)fileURL options:(AVMusicSequenceLoadOptions)options error:(NSError **)outError;
/*! @method loadFromData:options:error:
@abstract Parse the data and add the its events to the sequence
@param data
@param options
determines how the contents are mapped to tracks inside the sequence
@param outError
*/
- (BOOL)loadFromData:(NSData *)data options:(AVMusicSequenceLoadOptions)options error:(NSError **)outError;
/*! @method writeToURL:SMPTEResolution:replaceExisting:error:
@abstract Create and write a MIDI file from the events in the sequence
@param fileURL
the path for the file to be created
@param resolution
the relationship between "tick" and quarter note for saving to a Standard MIDI File - pass in
zero to use default - this will be the value that is currently set on the tempo track
@param replace
if the file already exists, YES will cause it to be overwritten with the new data.
Otherwise the call will fail with a permission error.
@param outError
@discussion
Only MIDI events are written when writing to the MIDI file. MIDI files are normally beat
based, but can also have a SMPTE (or real-time rather than beat time) representation.
The relationship between "tick" and quarter note for saving to Standard MIDI File
- pass in zero to use default - this will be the value that is currently set on the tempo track
*/
- (BOOL)writeToURL:(NSURL *)fileURL SMPTEResolution:(NSInteger)resolution replaceExisting:(BOOL)replace error:(NSError **)outError;
/*! @method dataWithSMPTEResolution:error:
@abstract Return a data object containing the events from the sequence
@discussion
All details regarding the SMPTE resolution apply here as well.
The returned NSData lifetime is controlled by the client.
*/
- (NSData *)dataWithSMPTEResolution:(NSInteger)SMPTEResolution error:(NSError **)outError;
/*! @method secondsForBeats:
@abstract Get the time in seconds for the given beat position (timestamp) in the track
*/
- (NSTimeInterval)secondsForBeats:(AVMusicTimeStamp)beats;
/*! @method beatsForSeconds:
@abstract Get the beat position (timestamp) for the given time in the track
*/
- (AVMusicTimeStamp)beatsForSeconds:(NSTimeInterval)seconds;
/* properties */
/*! @property tracks
@abstract An NSArray containing all the tracks in the sequence
@discussion
Track indices count from 0, and do not include the tempo track.
*/
@property (nonatomic, readonly) NSArray<AVMusicTrack *> *tracks;
/*! @property tempoTrack
@abstract The tempo track
@discussion
Each sequence has a single tempo track. All tempo events are placed into this track (as well
as other appropriate events (for instance, the time signature from a MIDI file). The tempo
track can be edited and iterated upon as any other track. Non-tempo events in a tempo track
are ignored.
*/
@property (nonatomic, readonly) AVMusicTrack *tempoTrack;
/*! @property userInfo
@abstract A dictionary containing meta-data derived from a sequence
@discussion
The dictionary can contain one or more of the kAFInfoDictionary_* keys
specified in <AudioToolbox/AudioFile.h>
*/
@property (nonatomic, readonly) NSDictionary<NSString *, id> *userInfo;
@end
@interface AVAudioSequencer(AVAudioSequencer_Player)
/*! @property currentPositionInSeconds
@abstract The current playback position in seconds
@discussion
Setting this positions the sequencer's player to the specified time. This can be set while
the player is playing, in which case playback will resume at the new position.
*/
@property(nonatomic) NSTimeInterval currentPositionInSeconds;
/*! @property currentPositionInBeats
@abstract The current playback position in beats
@discussion
Setting this positions the sequencer's player to the specified beat. This can be set while
the player is playing, in which case playback will resume at the new position.
*/
@property(nonatomic) NSTimeInterval currentPositionInBeats;
/*! @property playing
@abstract Indicates whether or not the sequencer's player is playing
@discussion
Returns TRUE if the sequencer's player has been started and not stopped. It may have
"played" past the end of the events in the sequence, but it is still considered to be
playing (and its time value increasing) until it is explicitly stopped.
*/
@property(nonatomic, readonly, getter=isPlaying) BOOL playing;
/*! @property rate
@abstract The playback rate of the sequencer's player
@discussion
1.0 is normal playback rate. Rate must be > 0.0.
*/
@property (nonatomic) float rate;
/*! @method hostTimeForBeats:error:
@abstract Returns the host time that will be (or was) played at the specified beat.
@discussion
This call is only valid if the player is playing and will return 0 with an error if the
player is not playing or if the starting position of the player (its "starting beat") was
after the specified beat. The method uses the sequence's tempo map to translate a beat
time from the starting time and beat of the player.
*/
- (UInt64)hostTimeForBeats:(AVMusicTimeStamp)inBeats error:(NSError **)outError;
/*! @method beatsForHostTime:error:
@abstract Returns the beat that will be (or was) played at the specified host time.
@discussion
This call is only valid if the player is playing and will return 0 with an error if the
player is not playing or if the starting time of the player was after the specified host
time. The method uses the sequence's tempo map to retrieve a beat time from the starting
and specified host time.
*/
- (AVMusicTimeStamp)beatsForHostTime:(UInt64)inHostTime error:(NSError **)outError;
/*! @method prepareToPlay
@abstract Get ready to play the sequence by prerolling all events
@discussion
Happens automatically on play if it has not already been called, but may produce a delay in
startup.
*/
- (void)prepareToPlay;
/*! @method startAndReturnError:
@abstract Start the sequencer's player
@discussion
If the AVAudioSequencer has not been prerolled, it will pre-roll itself and then start.
*/
- (BOOL)startAndReturnError:(NSError **)outError;
/*! @method stop
@abstract Stop the sequencer's player
@discussion
Stopping the player leaves it in an un-prerolled state, but stores the playback position so
that a subsequent call to startAndReturnError will resume where it left off. This action
will not stop an associated audio engine.
*/
- (void)stop;
@end
/*! @class AVMusicTrack
@abstract A collection of music events which will be sent to a given destination, and which can be
offset, muted, etc. independently of events in other tracks.
*/
NS_CLASS_AVAILABLE(10_11, 9_0) __WATCHOS_PROHIBITED
@interface AVMusicTrack : NSObject {
@protected
void *_impl;
}
/* properties */
/*! @property destinationAudioUnit
@abstract The AVAudioUnit which will receive the track's events
@discussion
This is mutually exclusive with setting a destination MIDIEndpoint. The AU must already be
attached to an audio engine, and the track must be part of the AVAudioSequencer associated
with that engine. When playing, the track will send its events to that AVAudioUnit. The
destination AU cannot be changed while the track's sequence is playing.
*/
@property (nonatomic, retain, nullable) AVAudioUnit *destinationAudioUnit;
/*! @property destinationMIDIEndpoint
@abstract Set the track's target to the specified MIDI endpoint
@discussion
This is mutually exclusive with setting a destination audio unit. Setting this will remove
the track's reference to an AVAudioUnit destination. When played, the track will send its
events to the MIDI Endpoint. See also MIDIDestinationCreate. The endpoint cannot be
changed while the track's sequence is playing.
*/
#if (TARGET_OS_MAC && !TARGET_OS_IPHONE) || TARGET_OS_IOS
@property (nonatomic) MIDIEndpointRef destinationMIDIEndpoint;
#endif
/*! @property loopRange
@abstract The timestamp range in beats for the loop
@discussion
The loop is set by specifying its beat range.
*/
@property (nonatomic) AVBeatRange loopRange;
/*! @property loopingEnabled
@abstract Determines whether or not the track is looped.
@discussion
If loopRange has not been set, the full track will be looped.
*/
@property (nonatomic,getter=isLoopingEnabled) BOOL loopingEnabled;
typedef NS_ENUM(NSInteger, AVMusicTrackLoopCount) {
AVMusicTrackLoopCountForever = -1
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @property numberOfLoops
@abstract The number of times that the track's loop will repeat
@discussion
If set to AVMusicTrackLoopCountForever, the track will loop forever.
Otherwise, legal values start with 1.
*/
@property (nonatomic) NSInteger numberOfLoops;
/*! @property offsetTime
@abstract Offset the track's start time to the specified time in beats
@discussion
By default this value is zero.
*/
@property (nonatomic) AVMusicTimeStamp offsetTime;
/*! @property muted
@abstract Whether the track is muted
*/
@property (nonatomic,getter=isMuted) BOOL muted;
/*! @property soloed
@abstract Whether the track is soloed
*/
@property (nonatomic,getter=isSoloed) BOOL soloed;
/*! @property lengthInBeats
@abstract The total duration of the track in beats
@discussion
This will return the beat of the last event in the track plus any additional time that may
be needed for fading out of ending notes or round a loop point to musical bar, etc. If this
has not been set by the user, the track length will always be adjusted to the end of the
last active event in a track and is adjusted dynamically as events are added or removed.
The property will return the maximum of the user-set track length, or the calculated length.
*/
@property (nonatomic) AVMusicTimeStamp lengthInBeats;
/*! @property lengthInSeconds
@abstract The total duration of the track in seconds
@discussion
This will return time of the last event in the track plus any additional time that may be
needed for fading out of ending notes or round a loop point to musical bar, etc. If this
has not been set by the user, the track length will always be adjusted to the end of the
last active event in a track and is adjusted dynamically as events are added or removed.
The property will return the maximum of the user-set track length, or the calculated length.
*/
@property (nonatomic) NSTimeInterval lengthInSeconds;
/*! @property timeResolution
@abstract The time resolution value for the sequence, in ticks (pulses) per quarter note (PPQN)
@discussion
If a MIDI file was used to construct the containing sequence, the resolution will be what
was in the file. If you want to keep a time resolution when writing a new file, you can
retrieve this value and then specify it when calling -[AVAudioSequencer
writeToFile:flags:withResolution]. It has no direct bearing on the rendering or notion of
time of the sequence itself, just its representation in MIDI files. By default this is set
to either 480 if the sequence was created manually, or a value based on what was in a MIDI
file if the sequence was created from a MIDI file.
This can only be retrieved from the tempo track.
*/
@property (nonatomic, readonly) NSUInteger timeResolution;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,955 +0,0 @@
/*
File: AVAudioSession.h
Framework: AVFoundation
Copyright 2009-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/NSObject.h>
#import <Foundation/NSArray.h>
#import <Foundation/NSDate.h> /* for NSTimeInterval */
#import <AvailabilityMacros.h>
#import <CoreAudio/CoreAudioTypes.h>
NS_ASSUME_NONNULL_BEGIN
/* This protocol is available with iPhone 3.0 or later */
@protocol AVAudioSessionDelegate;
@class NSError, NSString, NSNumber;
@class AVAudioSessionChannelDescription, AVAudioSessionPortDescription, AVAudioSessionRouteDescription, AVAudioSessionDataSourceDescription;
/*
Notes on terminology used in this API.
Some of the property names and class names in AVAudioSession differ from
the names used in the 'C' language Audio Session API. In this API, an audio
"route" is made up of zero or more input "ports" and zero or more ouput "ports".
If the current audio category does not support inputs, the route will consist purely of
outputs. Conversely, if the category does not support output, the route will
consist purely of inputs. Categories that support simultaneous input and output
will have both inputs and outputs in the route.
A "port" refers to a single input or output within an audio route. Examples of
ports include built-in speaker, wired microphone, or Bluetooth A2DP output.
*/
#pragma mark -- enumerations --
/* For use with AVAudioSessionInterruptionNotification */
typedef NS_OPTIONS(NSUInteger, AVAudioSessionInterruptionOptions)
{
AVAudioSessionInterruptionOptionShouldResume = 1
} NS_AVAILABLE_IOS(6_0);
/* options for use when calling setActive:withOptions:error:
AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation --
Notify an interrupted app that the interruption has ended and it may resume playback. Only valid on
session deactivation. */
typedef NS_OPTIONS(NSUInteger, AVAudioSessionSetActiveOptions)
{
AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation = 1
} NS_AVAILABLE_IOS(6_0);
/* values to use for setting overrideOutputAudioPort property
AVAudioSessionPortOverrideNone --
No override. Return audio routing to the default state for the current audio category.
AVAudioSessionPortOverrideSpeaker --
Route audio output to speaker. Use this override with AVAudioSessionCategoryPlayAndRecord, which by
default routes the output to the receiver. */
typedef NS_ENUM(NSUInteger, AVAudioSessionPortOverride)
{
AVAudioSessionPortOverrideNone = 0,
AVAudioSessionPortOverrideSpeaker __TVOS_PROHIBITED __WATCHOS_PROHIBITED = 'spkr'
} NS_AVAILABLE_IOS(6_0);
/* values for AVAudioSessionRouteChangeReasonKey in AVAudioSessionRouteChangeNotification userInfo dictionary
AVAudioSessionRouteChangeReasonUnknown
The reason is unknown.
AVAudioSessionRouteChangeReasonNewDeviceAvailable
A new device became available (e.g. headphones have been plugged in).
AVAudioSessionRouteChangeReasonOldDeviceUnavailable
The old device became unavailable (e.g. headphones have been unplugged).
AVAudioSessionRouteChangeReasonCategoryChange
The audio category has changed (e.g. AVAudioSessionCategoryPlayback has been changed to AVAudioSessionCategoryPlayAndRecord).
AVAudioSessionRouteChangeReasonOverride
The route has been overridden (e.g. category is AVAudioSessionCategoryPlayAndRecord and the output
has been changed from the receiver, which is the default, to the speaker).
AVAudioSessionRouteChangeReasonWakeFromSleep
The device woke from sleep.
AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory
Returned when there is no route for the current category (for instance, the category is AVAudioSessionCategoryRecord
but no input device is available).
AVAudioSessionRouteChangeReasonRouteConfigurationChange
Indicates that the set of input and/our output ports has not changed, but some aspect of their
configuration has changed. For example, a port's selected data source has changed.
*/
typedef NS_ENUM(NSUInteger, AVAudioSessionRouteChangeReason)
{
AVAudioSessionRouteChangeReasonUnknown = 0,
AVAudioSessionRouteChangeReasonNewDeviceAvailable = 1,
AVAudioSessionRouteChangeReasonOldDeviceUnavailable = 2,
AVAudioSessionRouteChangeReasonCategoryChange = 3,
AVAudioSessionRouteChangeReasonOverride = 4,
AVAudioSessionRouteChangeReasonWakeFromSleep = 6,
AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory = 7,
AVAudioSessionRouteChangeReasonRouteConfigurationChange NS_ENUM_AVAILABLE_IOS(7_0) = 8
} NS_AVAILABLE_IOS(6_0);
/* values for setCategory:withOptions:error:
AVAudioSessionCategoryOptionMixWithOthers --
This allows an application to set whether or not other active audio apps will be interrupted or mixed with
when your app's audio session goes active. The typical cases are:
(1) AVAudioSessionCategoryPlayAndRecord or AVAudioSessionCategoryMultiRoute
this will default to false, but can be set to true. This would allow other applications to play in the background
while an app had both audio input and output enabled
(2) AVAudioSessionCategoryPlayback
this will default to false, but can be set to true. This would allow other applications to play in the background,
but an app will still be able to play regardless of the setting of the ringer switch
(3) Other categories
this defaults to false and cannot be changed (that is, the mix with others setting of these categories
cannot be overridden. An application must be prepared for setting this property to fail as behaviour
may change in future releases. If an application changes their category, they should reassert the
option (it is not sticky across category changes).
AVAudioSessionCategoryOptionDuckOthers --
This allows an application to set whether or not other active audio apps will be ducked when when your app's audio
session goes active. An example of this is the Nike app, which provides periodic updates to its user (it reduces the
volume of any music currently being played while it provides its status). This defaults to off. Note that the other
audio will be ducked for as long as the current session is active. You will need to deactivate your audio
session when you want full volume playback of the other audio.
If your category is AVAudioSessionCategoryPlayback, AVAudioSessionCategoryPlayAndRecord, or
AVAudioSessionCategoryMultiRoute, by default the audio session will be non-mixable and non-ducking.
Setting this option will also make your category mixable with others (AVAudioSessionCategoryOptionMixWithOthers
will be set).
AVAudioSessionCategoryOptionAllowBluetooth --
This allows an application to change the default behaviour of some audio session categories with regards to showing
bluetooth Hands-Free Profile (HFP) devices as available routes. The current category behavior is:
(1) AVAudioSessionCategoryPlayAndRecord
this will default to false, but can be set to true. This will allow a paired bluetooth HFP device to show up as
an available route for input, while playing through the category-appropriate output
(2) AVAudioSessionCategoryRecord
this will default to false, but can be set to true. This will allow a paired bluetooth HFP device to show up
as an available route for input
(3) Other categories
this defaults to false and cannot be changed (that is, enabling bluetooth for input in these categories is
not allowed)
An application must be prepared for setting this option to fail as behaviour may change in future releases.
If an application changes their category or mode, they should reassert the override (it is not sticky
across category and mode changes).
AVAudioSessionCategoryOptionDefaultToSpeaker --
This allows an application to change the default behaviour of some audio session categories with regards to
the audio route. The current category behavior is:
(1) AVAudioSessionCategoryPlayAndRecord category
this will default to false, but can be set to true. this will route to Speaker (instead of Receiver)
when no other audio route is connected.
(2) Other categories
this defaults to false and cannot be changed (that is, the default to speaker setting of these
categories cannot be overridden
An application must be prepared for setting this property to fail as behaviour may change in future releases.
If an application changes their category, they should reassert the override (it is not sticky across
category and mode changes).
AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers --
If another app's audio session mode is set to AVAudioSessionModeSpokenAudio (podcast playback in the background for example),
then that other app's audio will be interrupted when the current application's audio session goes active. An example of this
is a navigation app that provides navigation prompts to its user (it pauses any spoken audio currently being played while it
plays the prompt). This defaults to off. Note that the other app's audio will be paused for as long as the current session is
active. You will need to deactivate your audio session to allow the other audio to resume playback.
Setting this option will also make your category mixable with others (AVAudioSessionCategoryOptionMixWithOthers
will be set). If you want other non-spoken audio apps to duck their audio when your app's session goes active, also set
AVAudioSessionCategoryOptionDuckOthers.
AVAudioSessionCategoryOptionAllowBluetoothA2DP --
This allows an application to change the default behaviour of some audio session categories with regards to showing
bluetooth Advanced Audio Distribution Profile (A2DP), i.e. stereo Bluetooth, devices as available routes. The current
category behavior is:
(1) AVAudioSessionCategoryPlayAndRecord
this will default to false, but can be set to true. This will allow a paired bluetooth A2DP device to show up as
an available route for output, while recording through the category-appropriate input
(2) AVAudioSessionCategoryMultiRoute and AVAudioSessionCategoryRecord
this will default to false, and cannot be set to true.
(3) Other categories
this defaults to true and cannot be changed (that is, bluetooth A2DP ports are always supported in output-only categories).
An application must be prepared for setting this option to fail as behaviour may change in future releases.
If an application changes their category or mode, they should reassert the override (it is not sticky
across category and mode changes).
Setting both AVAudioSessionCategoryOptionAllowBluetooth and AVAudioSessionCategoryOptionAllowBluetoothA2DP is allowed. In cases
where a single Bluetooth device supports both HFP and A2DP, the HFP ports will be given a higher priority for routing. For HFP
and A2DP ports on separate hardware devices, the last-in wins rule applies.
AVAudioSessionCategoryOptionAllowAirPlay --
This allows an application to change the default behaviour of some audio session categories with regards to showing
AirPlay devices as available routes. See the documentation of AVAudioSessionCategoryOptionAllowBluetoothA2DP for details on
how this option applies to specific categories.
*/
typedef NS_OPTIONS(NSUInteger, AVAudioSessionCategoryOptions)
{
/* MixWithOthers is only valid with AVAudioSessionCategoryPlayAndRecord, AVAudioSessionCategoryPlayback, and AVAudioSessionCategoryMultiRoute */
AVAudioSessionCategoryOptionMixWithOthers = 0x1,
/* DuckOthers is only valid with AVAudioSessionCategoryAmbient, AVAudioSessionCategoryPlayAndRecord, AVAudioSessionCategoryPlayback, and AVAudioSessionCategoryMultiRoute */
AVAudioSessionCategoryOptionDuckOthers = 0x2,
/* AllowBluetooth is only valid with AVAudioSessionCategoryRecord and AVAudioSessionCategoryPlayAndRecord */
AVAudioSessionCategoryOptionAllowBluetooth __TVOS_PROHIBITED __WATCHOS_PROHIBITED = 0x4,
/* DefaultToSpeaker is only valid with AVAudioSessionCategoryPlayAndRecord */
AVAudioSessionCategoryOptionDefaultToSpeaker __TVOS_PROHIBITED __WATCHOS_PROHIBITED = 0x8,
/* InterruptSpokenAudioAndMixWithOthers is only valid with AVAudioSessionCategoryPlayAndRecord, AVAudioSessionCategoryPlayback, and AVAudioSessionCategoryMultiRoute */
AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers NS_AVAILABLE_IOS(9_0) = 0x11,
/* AllowBluetoothA2DP is only valid with AVAudioSessionCategoryPlayAndRecord */
AVAudioSessionCategoryOptionAllowBluetoothA2DP API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0)) = 0x20,
/* AllowAirPlay is only valid with AVAudioSessionCategoryPlayAndRecord */
AVAudioSessionCategoryOptionAllowAirPlay API_AVAILABLE(ios(10.0), tvos(10.0)) __WATCHOS_PROHIBITED = 0x40,
} NS_AVAILABLE_IOS(6_0);
typedef NS_ENUM(NSUInteger, AVAudioSessionInterruptionType)
{
AVAudioSessionInterruptionTypeBegan = 1, /* the system has interrupted your audio session */
AVAudioSessionInterruptionTypeEnded = 0, /* the interruption has ended */
} NS_AVAILABLE_IOS(6_0);
/* Used in AVAudioSessionSilenceSecondaryAudioHintNotification to indicate whether optional secondary audio muting should begin or end */
typedef NS_ENUM(NSUInteger, AVAudioSessionSilenceSecondaryAudioHintType)
{
AVAudioSessionSilenceSecondaryAudioHintTypeBegin = 1, /* the system is indicating that another application's primary audio has started */
AVAudioSessionSilenceSecondaryAudioHintTypeEnd = 0, /* the system is indicating that another application's primary audio has stopped */
} NS_AVAILABLE_IOS(8_0);
/*!
@enum AVAudioSessionRecordPermission values
@abstract These are the values returned by recordPermission.
@constant AVAudioSessionRecordPermissionUndetermined
The user has not yet been asked for permission.
@constant AVAudioSessionRecordPermissionDenied
The user has been asked and has denied permission.
@constant AVAudioSessionRecordPermissionGranted
The user has been asked and has granted permission.
*/
typedef NS_ENUM(NSUInteger, AVAudioSessionRecordPermission)
{
AVAudioSessionRecordPermissionUndetermined = 'undt',
AVAudioSessionRecordPermissionDenied = 'deny',
AVAudioSessionRecordPermissionGranted = 'grnt'
} __TVOS_PROHIBITED API_AVAILABLE(ios(8.0), watchos(4.0));
/*
@enum AVAudioSessionIOType values
@abstract Values to be used by setAggregatedIOPreference:error: method.
@discussion Starting in iOS 10, applications that use AVCaptureSession on iPads and iPhones that
support taking Live Photos, will have non-aggregated audio I/O unless the app opts out by
setting its AVAudioSessionIOType to Aggregated. Non-aggregated audio I/O means that separate
threads will be used to service audio I/O for input and output directions.
Note that in cases where the I/O is not aggregated, the sample rate and IO buffer duration
properties will map to the output audio device. In this scenario, the input and
output audio hardware may be running at different sample rates and with different IO buffer
durations. If your app requires input and output audio to be presented in the same realtime
I/O callback, or requires that input and output audio have the same sample rate or IO buffer
duration, or if your app requires the ability to set a preferred sample rate or IO buffer duration
for audio input, set the AVAudioSessionIOType to Aggregated.
Apps that don't use AVCaptureSession and use AVAudioSessionCategoryPlayAndRecord will continue
to have aggregated audio I/O, as in previous versions of iOS.
@constant AVAudioSessionIOTypeNotSpecified
The default value. If your app does not use AVCaptureSession or does not have any specific
requirement for aggregating input and output audio in the same realtime I/O callback, use this
value. Note that if your app does not use AVCaptureSession, it will get aggregated I/O when using
AVAudioSessionCategoryPlayAndRecord.
If your app does utilize AVCaptureSession, use of this value will allow AVCaptureSession to
start recording without glitching already running output audio and will allow the system to
utilize power-saving optimizations.
@constant AVAudioSessionIOTypeAggregated
Use this value if your session uses AVAudioSessionCategoryPlayAndRecord and requires input and
output audio to be presented in the same realtime I/O callback. For example, if your app will be using
a RemoteIO with both input and output enabled.
Note that your session's preference to use aggregated IO will not be honored if it specifies
AVAudioSessionCategoryOptionMixWithOthers AND another app's audio session was already active
with non-mixable, non-aggregated input/output.
*/
typedef NS_ENUM(NSUInteger, AVAudioSessionIOType)
{
AVAudioSessionIOTypeNotSpecified = 0,
AVAudioSessionIOTypeAggregated = 1
} API_AVAILABLE(ios(10.0)) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
/*!
@enum AVAudioSessionRouteSharingPolicy
@constant AVAudioSessionRouteSharingPolicyDefault
Follow normal rules for routing audio output.
@constant AVAudioSessionRouteSharingPolicyLongForm
Route output to the shared long-form audio output. A session whose primary use case is as a
music or podcast player may use this value to play to the same output as the built-in Music (iOS),
Podcasts, or iTunes (macOS) applications. Typically applications that use this policy will also
want sign up for remote control events as documented in Event Handling Guide for UIKit Apps
and will want to utilize MediaPlayer frameworks MPNowPlayingInfoCenter class. All applications
on the system that use the long-form route sharing policy will have their audio routed to the
same location.
@constant AVAudioSessionRouteSharingPolicyIndependent
Applications should not attempt to set this value directly. On iOS, this value will be set by
the system in cases where route picker UI is used to direct video to a wireless route.
*/
typedef NS_ENUM(NSUInteger, AVAudioSessionRouteSharingPolicy)
{
AVAudioSessionRouteSharingPolicyDefault = 0,
AVAudioSessionRouteSharingPolicyLongForm = 1,
AVAudioSessionRouteSharingPolicyIndependent = 2,
} API_AVAILABLE(ios(11.0), tvos(11.0), macos(10.13)) __WATCHOS_PROHIBITED;
/*!
@enum AVAudioSession error codes
@abstract These are the error codes returned from the AVAudioSession API.
@constant AVAudioSessionErrorCodeNone
Operation succeeded.
@constant AVAudioSessionErrorCodeMediaServicesFailed
The app attempted to use the audio session during or after a Media Services failure. App should
wait for a AVAudioSessionMediaServicesWereResetNotification and then rebuild all its state.
@constant AVAudioSessionErrorCodeIsBusy
The app attempted to set its audio session inactive or change its AVAudioSessionIOType, but it is still actively playing and/or recording.
@constant AVAudioSessionErrorCodeIncompatibleCategory
The app tried to perform an operation on a session but its category does not support it.
For instance, if the app calls setPreferredInputNumberOfChannels: while in a playback-only category.
@constant AVAudioSessionErrorCodeCannotInterruptOthers
The app's audio session is non-mixable and trying to go active while in the background.
This is allowed only when the app is the NowPlaying app.
@constant AVAudioSessionErrorCodeMissingEntitlement
The app does not have the required entitlements to perform an operation.
@constant AVAudioSessionErrorCodeSiriIsRecording
The app tried to do something with the audio session that is not allowed while Siri is recording.
@constant AVAudioSessionErrorCodeCannotStartPlaying
The app is not allowed to start recording and/or playing, usually because of a lack of audio key in
its Info.plist. This could also happen if the app has this key but uses a category that can't record
and/or play in the background (AVAudioSessionCategoryAmbient, AVAudioSessionCategorySoloAmbient, etc.).
@constant AVAudioSessionErrorCodeCannotStartRecording
The app is not allowed to start recording, usually because it is starting a mixable recording from the
background and is not an Inter-App Audio app.
@constant AVAudioSessionErrorCodeBadParam
An illegal value was used for a property.
@constant AVAudioSessionErrorInsufficientPriority
The app was not allowed to set the audio category because another app (Phone, etc.) is controlling it.
@constant AVAudioSessionErrorCodeResourceNotAvailable
The operation failed because the device does not have sufficient hardware resources to complete the action.
For example, the operation requires audio input hardware, but the device has no audio input available.
@constant AVAudioSessionErrorCodeUnspecified
An unspecified error has occurred.
*/
typedef NS_ENUM(NSInteger, AVAudioSessionErrorCode)
{
AVAudioSessionErrorCodeNone = 0,
AVAudioSessionErrorCodeMediaServicesFailed = 'msrv', /* 0x6D737276, 1836282486 */
AVAudioSessionErrorCodeIsBusy = '!act', /* 0x21616374, 560030580 */
AVAudioSessionErrorCodeIncompatibleCategory = '!cat', /* 0x21636174, 560161140 */
AVAudioSessionErrorCodeCannotInterruptOthers = '!int', /* 0x21696E74, 560557684 */
AVAudioSessionErrorCodeMissingEntitlement = 'ent?', /* 0x656E743F, 1701737535 */
AVAudioSessionErrorCodeSiriIsRecording = 'siri', /* 0x73697269, 1936290409 */
AVAudioSessionErrorCodeCannotStartPlaying = '!pla', /* 0x21706C61, 561015905 */
AVAudioSessionErrorCodeCannotStartRecording = '!rec', /* 0x21726563, 561145187 */
AVAudioSessionErrorCodeBadParam = -50,
AVAudioSessionErrorInsufficientPriority = '!pri', /* 0x21707269, 561017449 */
AVAudioSessionErrorCodeResourceNotAvailable = '!res', /* 0x21726573, 561145203 */
AVAudioSessionErrorCodeUnspecified = 'what' /* 0x77686174, 2003329396 */
} API_AVAILABLE(ios(7.0), macos(10.13), watchos(2.0), tvos(7.0));
#if TARGET_OS_OSX
#pragma mark -- macOS AVAudioSession interface --
API_AVAILABLE(ios(3.0), macos(10.13), watchos(2.0), tvos(3.0))
@interface AVAudioSession : NSObject {
@private
void *_impl;
}
/* returns singleton instance */
+ (AVAudioSession *)sharedInstance;
/* AVAudioSession is a singleton. Use +sharedInstance instead of -init */
- (instancetype)init NS_UNAVAILABLE;
/* Get the route sharing policy. See AVAudioSessionRouteSharingPolicy for a description of the available policies. */
@property (readonly) AVAudioSessionRouteSharingPolicy routeSharingPolicy API_AVAILABLE(macos(10.13));
/* Set the route sharing policy. See AVAudioSessionRouteSharingPolicy for a description of the available policies. */
- (BOOL)setRouteSharingPolicy:(AVAudioSessionRouteSharingPolicy)inPolicy error:(NSError **)outError API_AVAILABLE(macos(10.13));
@end // @interface AVAudioSession
#else
#pragma mark -- iOS/tvOS/watchOS AVAudioSession interface --
API_AVAILABLE(ios(3.0), macos(10.13), watchos(2.0), tvos(3.0))
@interface AVAudioSession : NSObject {
@private
void *_impl;
}
/* returns singleton instance */
+ (AVAudioSession *)sharedInstance;
/* Set the session active or inactive. Note that activating an audio session is a synchronous (blocking) operation.
Therefore, we recommend that applications not activate their session from a thread where a long blocking operation will be problematic.
Note that this method will throw an exception in apps linked on or after iOS 8 if the session is set inactive while it has running or
paused I/O (e.g. audio queues, players, recorders, converters, remote I/Os, etc.).
*/
- (BOOL)setActive:(BOOL)active error:(NSError **)outError;
- (BOOL)setActive:(BOOL)active withOptions:(AVAudioSessionSetActiveOptions)options error:(NSError **)outError NS_AVAILABLE_IOS(6_0);
// Get the list of categories available on the device. Certain categories may be unavailable on particular devices. For example,
// AVAudioSessionCategoryRecord will not be available on devices that have no support for audio input.
@property (readonly) NSArray<NSString *> *availableCategories NS_AVAILABLE_IOS(9_0);
/* set session category */
- (BOOL)setCategory:(NSString *)category error:(NSError **)outError;
/* set session category with options */
- (BOOL)setCategory:(NSString *)category withOptions:(AVAudioSessionCategoryOptions)options error:(NSError **)outError NS_AVAILABLE_IOS(6_0);
/* set session category and mode with options */
- (BOOL)setCategory:(NSString *)category mode:(NSString *)mode options:(AVAudioSessionCategoryOptions)options error:(NSError **)outError API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0));
/* set session category, mode, routing sharing policy, and options
Use of the long-form route sharing policy is only valid in conjunction with a limited set of category, mode, and option values.
Allowed categories: AVAudioSessionCategoryPlayback
Allowed modes: AVAudioSessionModeDefault, AVAudioSessionModeMoviePlayback, AVAudioSessionModeSpokenAudio
Allowed options: None. Options are allowed when changing the routing policy back to Default, however. */
- (BOOL)setCategory:(NSString *)category mode:(NSString *)mode routeSharingPolicy:(AVAudioSessionRouteSharingPolicy)policy options:(AVAudioSessionCategoryOptions)options error:(NSError **)outError API_AVAILABLE(ios(11.0), tvos(11.0)) __WATCHOS_PROHIBITED;
/* get session category. Examples: AVAudioSessionCategoryRecord, AVAudioSessionCategoryPlayAndRecord, etc. */
@property (readonly) NSString *category;
/* get the current set of AVAudioSessionCategoryOptions */
@property (readonly) AVAudioSessionCategoryOptions categoryOptions NS_AVAILABLE_IOS(6_0);
/* Get the routing policy. See AVAudioSessionRouteSharingPolicy for a description of the available policies
See -setCategory:mode:routeSharingPolicy:options:error: method for additional discussion. */
@property (readonly) AVAudioSessionRouteSharingPolicy routeSharingPolicy API_AVAILABLE(ios(11.0), tvos(11.0)) __WATCHOS_PROHIBITED;
// Modes modify the audio category in order to introduce behavior that is tailored to the specific
// use of audio within an application. Examples: AVAudioSessionModeVideoRecording, AVAudioSessionModeVoiceChat,
// AVAudioSessionModeMeasurement, etc.
// Get the list of modes available on the device. Certain modes may be unavailable on particular devices. For example,
// AVAudioSessionModeVideoRecording will not be available on devices that have no support for recording video.
@property (readonly) NSArray<NSString *> *availableModes NS_AVAILABLE_IOS(9_0);
- (BOOL)setMode:(NSString *)mode error:(NSError **)outError NS_AVAILABLE_IOS(5_0); /* set session mode */
@property (readonly) NSString *mode NS_AVAILABLE_IOS(5_0); /* get session mode */
/* Returns an enum indicating whether the user has granted or denied permission to record, or has not been asked */
- (AVAudioSessionRecordPermission)recordPermission __TVOS_PROHIBITED API_AVAILABLE(ios(8.0), watchos(4.0));
/* Checks to see if calling process has permission to record audio. The 'response' block will be called
immediately if permission has already been granted or denied. Otherwise, it presents a dialog to notify
the user and allow them to choose, and calls the block once the UI has been dismissed. 'granted'
indicates whether permission has been granted. Note that the block may be called in a different thread context.
*/
typedef void (^PermissionBlock)(BOOL granted);
- (void)requestRecordPermission:(PermissionBlock)response __TVOS_PROHIBITED API_AVAILABLE(ios(7.0), watchos(4.0));
- (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride error:(NSError **)outError NS_AVAILABLE_IOS(6_0);
/* Will be true when another application is playing audio.
Note: As of iOS 8.0, Apple recommends that most applications use secondaryAudioShouldBeSilencedHint instead of this property.
The otherAudioPlaying property will be true if any other audio (including audio from an app using AVAudioSessionCategoryAmbient)
is playing, whereas the secondaryAudioShouldBeSilencedHint property is more restrictive in its consideration of whether
primary audio from another application is playing.
*/
@property (readonly, getter=isOtherAudioPlaying) BOOL otherAudioPlaying NS_AVAILABLE_IOS(6_0);
/* Will be true when another application with a non-mixable audio session is playing audio. Applications may use
this property as a hint to silence audio that is secondary to the functionality of the application. For example, a game app
using AVAudioSessionCategoryAmbient may use this property to decide to mute its soundtrack while leaving its sound effects unmuted.
Note: This property is closely related to AVAudioSessionSilenceSecondaryAudioHintNotification.
*/
@property (readonly) BOOL secondaryAudioShouldBeSilencedHint NS_AVAILABLE_IOS(8_0);
/* A description of the current route, consisting of zero or more input ports and zero or more output ports */
@property (readonly) AVAudioSessionRouteDescription *currentRoute NS_AVAILABLE_IOS(6_0);
/* Select a preferred input port for audio routing. If the input port is already part of the current audio route, this will have no effect.
Otherwise, selecting an input port for routing will initiate a route change to use the preferred input port, provided that the application's
session controls audio routing. Setting a nil value will clear the preference. */
- (BOOL)setPreferredInput:(nullable AVAudioSessionPortDescription *)inPort error:(NSError **)outError NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
@property (readonly, nullable) AVAudioSessionPortDescription *preferredInput NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED; /* Get the preferred input port. Will be nil if no preference has been set */
/* Get the set of input ports that are available for routing. Note that this property only applies to the session's current category and mode.
For example, if the session's current category is AVAudioSessionCategoryPlayback, there will be no available inputs. */
@property (readonly, nullable) NSArray<AVAudioSessionPortDescription *> *availableInputs NS_AVAILABLE_IOS(7_0);
@end
/* AVAudioSessionHardwareConfiguration manages the set of properties that reflect the current state of
audio hardware in the current route. Applications whose functionality depends on these properties should
reevaluate them any time the route changes. */
@interface AVAudioSession (AVAudioSessionHardwareConfiguration)
/* Get and set preferred values for hardware properties. Note: that there are corresponding read-only
properties that describe the actual values for sample rate, I/O buffer duration, etc. */
/* The preferred hardware sample rate for the session. The actual sample rate may be different. */
- (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED;
@property (readonly) double preferredSampleRate NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED;
/* The preferred hardware IO buffer duration in seconds. The actual IO buffer duration may be different. */
- (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration error:(NSError **)outError __WATCHOS_PROHIBITED;
@property (readonly) NSTimeInterval preferredIOBufferDuration __WATCHOS_PROHIBITED;
/* Sets the number of input channels that the app would prefer for the current route */
- (BOOL)setPreferredInputNumberOfChannels:(NSInteger)count error:(NSError **)outError NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
@property (readonly) NSInteger preferredInputNumberOfChannels NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Sets the number of output channels that the app would prefer for the current route */
- (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count error:(NSError **)outError NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
@property (readonly) NSInteger preferredOutputNumberOfChannels NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Returns the largest number of audio input channels available for the current route */
@property (readonly) NSInteger maximumInputNumberOfChannels NS_AVAILABLE_IOS(7_0);
/* Returns the largest number of audio output channels available for the current route */
@property (readonly) NSInteger maximumOutputNumberOfChannels NS_AVAILABLE_IOS(7_0);
/* A value defined over the range [0.0, 1.0], with 0.0 corresponding to the lowest analog
gain setting and 1.0 corresponding to the highest analog gain setting. Attempting to set values
outside of the defined range will result in the value being "clamped" to a valid input. This is
a global input gain setting that applies to the current input source for the entire system.
When no applications are using the input gain control, the system will restore the default input
gain setting for the input source. Note that some audio accessories, such as USB devices, may
not have a default value. This property is only valid if inputGainSettable
is true. Note: inputGain is key-value observable */
- (BOOL)setInputGain:(float)gain error:(NSError **)outError NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED;
@property (readonly) float inputGain NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED; /* value in range [0.0, 1.0] */
/* True when audio input gain is available. Some input ports may not provide the ability to set the
input gain, so check this value before attempting to set input gain. */
@property (readonly, getter=isInputGainSettable) BOOL inputGainSettable NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED;
/* True if input hardware is available. */
@property (readonly, getter=isInputAvailable) BOOL inputAvailable NS_AVAILABLE_IOS(6_0);
/* DataSource methods are for use with routes that support input or output data source selection.
If the attached accessory supports data source selection, the data source properties/methods provide for discovery and
selection of input and/or output data sources. Note that the properties and methods for data source selection below are
equivalent to the properties and methods on AVAudioSessionPortDescription. The methods below only apply to the currently
routed ports. */
/* Key-value observable. */
@property (readonly, nullable) NSArray<AVAudioSessionDataSourceDescription *> *inputDataSources NS_AVAILABLE_IOS(6_0);
/* Get and set the currently selected data source. Will be nil if no data sources are available.
Setting a nil value will clear the data source preference. */
@property (readonly, nullable) AVAudioSessionDataSourceDescription *inputDataSource NS_AVAILABLE_IOS(6_0);
- (BOOL)setInputDataSource:(nullable AVAudioSessionDataSourceDescription *)dataSource error:(NSError **)outError NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED;
/* Key-value observable. */
@property (readonly, nullable) NSArray<AVAudioSessionDataSourceDescription *> *outputDataSources NS_AVAILABLE_IOS(6_0);
/* Get and set currently selected data source. Will be nil if no data sources are available.
Setting a nil value will clear the data source preference. */
@property (readonly, nullable) AVAudioSessionDataSourceDescription *outputDataSource NS_AVAILABLE_IOS(6_0);
- (BOOL)setOutputDataSource:(nullable AVAudioSessionDataSourceDescription *)dataSource error:(NSError **)outError NS_AVAILABLE_IOS(6_0) __WATCHOS_PROHIBITED;
/* Current values for hardware properties. Note that most of these properties have corresponding methods
for getting and setting preferred values. Input- and output-specific properties will generate an error if they are
queried if the audio session category does not support them. Each of these will return 0 (or 0.0) if there is an error. */
/* The current hardware sample rate */
@property (readonly) double sampleRate NS_AVAILABLE_IOS(6_0);
/* The current number of hardware input channels. Is key-value observable */
@property (readonly) NSInteger inputNumberOfChannels NS_AVAILABLE_IOS(6_0);
/* The current number of hardware output channels. Is key-value observable */
@property (readonly) NSInteger outputNumberOfChannels NS_AVAILABLE_IOS(6_0);
/* The current output volume. Is key-value observable */
@property (readonly) float outputVolume NS_AVAILABLE_IOS(6_0); /* value in range [0.0, 1.0] */
/* The current hardware input latency in seconds. */
@property (readonly) NSTimeInterval inputLatency NS_AVAILABLE_IOS(6_0);
/* The current hardware output latency in seconds. */
@property (readonly) NSTimeInterval outputLatency NS_AVAILABLE_IOS(6_0);
/* The current hardware IO buffer duration in seconds. */
@property (readonly) NSTimeInterval IOBufferDuration NS_AVAILABLE_IOS(6_0);
/* Set inIOType to AVAudioSessionIOTypeAggregated if your app uses AVAudioSessionCategoryPlayAndRecord
and requires input and output audio to be presented in the same realtime I/O callback. See the AVAudioSessionIOType
documentation for more details.
*/
- (BOOL)setAggregatedIOPreference:(AVAudioSessionIOType)inIOType error:(NSError **)outError API_AVAILABLE(ios(10.0)) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
@end
#pragma mark -- Names for NSNotifications --
/* Registered listeners will be notified when the system has interrupted the audio session and when
the interruption has ended. Check the notification's userInfo dictionary for the interruption type -- either begin or end.
In the case of an end interruption notification, check the userInfo dictionary for AVAudioSessionInterruptionOptions that
indicate whether audio playback should resume.
In cases where the interruption is a consequence of the application being suspended, the info dictionary will contain
AVAudioSessionInterruptionWasSuspendedKey, with the boolean value set to true.
*/
AVF_EXPORT NSString *const AVAudioSessionInterruptionNotification NS_AVAILABLE_IOS(6_0);
/* Registered listeners will be notified when a route change has occurred. Check the notification's userInfo dictionary for the
route change reason and for a description of the previous audio route.
*/
AVF_EXPORT NSString *const AVAudioSessionRouteChangeNotification NS_AVAILABLE_IOS(6_0);
/* Registered listeners will be notified if the media server is killed. In the event that the server is killed,
take appropriate steps to handle requests that come in before the server resets. See Technical Q&A QA1749.
*/
AVF_EXPORT NSString *const AVAudioSessionMediaServicesWereLostNotification NS_AVAILABLE_IOS(7_0);
/* Registered listeners will be notified when the media server restarts. In the event that the server restarts,
take appropriate steps to re-initialize any audio objects used by your application. See Technical Q&A QA1749.
*/
AVF_EXPORT NSString *const AVAudioSessionMediaServicesWereResetNotification NS_AVAILABLE_IOS(6_0);
/* Registered listeners that are currently in the foreground and have active audio sessions will be notified
when primary audio from other applications starts and stops. Check the notification's userInfo dictionary
for the notification type -- either begin or end.
Foreground applications may use this notification as a hint to enable or disable audio that is secondary
to the functionality of the application. For more information, see the related property secondaryAudioShouldBeSilencedHint.
*/
AVF_EXPORT NSString *const AVAudioSessionSilenceSecondaryAudioHintNotification NS_AVAILABLE_IOS(8_0);
#pragma mark -- Keys for NSNotification userInfo dictionaries --
/* keys for AVAudioSessionInterruptionNotification */
/* Value is an NSNumber representing an AVAudioSessionInterruptionType */
AVF_EXPORT NSString *const AVAudioSessionInterruptionTypeKey NS_AVAILABLE_IOS(6_0);
/* Only present for end interruption events. Value is of type AVAudioSessionInterruptionOptions.*/
AVF_EXPORT NSString *const AVAudioSessionInterruptionOptionKey NS_AVAILABLE_IOS(6_0);
/* Only present in begin interruption events, where the interruption is a direct result of the application being suspended
by the operating sytem. Value is a boolean NSNumber, where a true value indicates that the interruption is the result
of the application being suspended, rather than being interrupted by another audio session.
Starting in iOS 10, the system will deactivate the audio session of most apps in response to the app process
being suspended. When the app starts running again, it will receive the notification that its session has been deactivated
by the system. Note that the notification is necessarily delayed in time, due to the fact that the application was suspended
at the time the session was deactivated by the system and the notification can only be delivered once the app is running again. */
AVF_EXPORT NSString *const AVAudioSessionInterruptionWasSuspendedKey NS_AVAILABLE_IOS(10_3);
/* keys for AVAudioSessionRouteChangeNotification */
/* value is an NSNumber representing an AVAudioSessionRouteChangeReason */
AVF_EXPORT NSString *const AVAudioSessionRouteChangeReasonKey NS_AVAILABLE_IOS(6_0);
/* value is AVAudioSessionRouteDescription * */
AVF_EXPORT NSString *const AVAudioSessionRouteChangePreviousRouteKey NS_AVAILABLE_IOS(6_0);
/* keys for AVAudioSessionSilenceSecondaryAudioHintNotification */
/* value is an NSNumber representing an AVAudioSessionSilenceSecondaryAudioHintType */
AVF_EXPORT NSString *const AVAudioSessionSilenceSecondaryAudioHintTypeKey NS_AVAILABLE_IOS(8_0);
#pragma mark -- Values for the category property --
/* Use this category for background sounds such as rain, car engine noise, etc.
Mixes with other music. */
AVF_EXPORT NSString *const AVAudioSessionCategoryAmbient;
/* Use this category for background sounds. Other music will stop playing. */
AVF_EXPORT NSString *const AVAudioSessionCategorySoloAmbient;
/* Use this category for music tracks.*/
AVF_EXPORT NSString *const AVAudioSessionCategoryPlayback;
/* Use this category when recording audio. */
AVF_EXPORT NSString *const AVAudioSessionCategoryRecord;
/* Use this category when recording and playing back audio. */
AVF_EXPORT NSString *const AVAudioSessionCategoryPlayAndRecord;
/* Use this category when using a hardware codec or signal processor while
not playing or recording audio. */
AVF_EXPORT NSString *const AVAudioSessionCategoryAudioProcessing NS_DEPRECATED_IOS(3_0, 10_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
/* Use this category to customize the usage of available audio accessories and built-in audio hardware.
For example, this category provides an application with the ability to use an available USB output
and headphone output simultaneously for separate, distinct streams of audio data. Use of
this category by an application requires a more detailed knowledge of, and interaction with,
the capabilities of the available audio routes. May be used for input, output, or both.
Note that not all output types and output combinations are eligible for multi-route. Input is limited
to the last-in input port. Eligible inputs consist of the following:
AVAudioSessionPortUSBAudio, AVAudioSessionPortHeadsetMic, and AVAudioSessionPortBuiltInMic.
Eligible outputs consist of the following:
AVAudioSessionPortUSBAudio, AVAudioSessionPortLineOut, AVAudioSessionPortHeadphones, AVAudioSessionPortHDMI,
and AVAudioSessionPortBuiltInSpeaker.
Note that AVAudioSessionPortBuiltInSpeaker is only allowed to be used when there are no other eligible
outputs connected. */
AVF_EXPORT NSString *const AVAudioSessionCategoryMultiRoute NS_AVAILABLE_IOS(6_0);
#pragma mark -- Values for the mode property --
/*!
@abstract Modes modify the audio category in order to introduce behavior that is tailored to the specific
use of audio within an application. Available in iOS 5.0 and greater.
*/
/* The default mode */
AVF_EXPORT NSString *const AVAudioSessionModeDefault NS_AVAILABLE_IOS(5_0);
/* Only valid with AVAudioSessionCategoryPlayAndRecord. Appropriate for Voice over IP
(VoIP) applications. Reduces the number of allowable audio routes to be only those
that are appropriate for VoIP applications and may engage appropriate system-supplied
signal processing. Has the side effect of setting AVAudioSessionCategoryOptionAllowBluetooth */
AVF_EXPORT NSString *const AVAudioSessionModeVoiceChat NS_AVAILABLE_IOS(5_0);
/* Set by Game Kit on behalf of an application that uses a GKVoiceChat object; valid
only with the AVAudioSessionCategoryPlayAndRecord category.
Do not set this mode directly. If you need similar behavior and are not using
a GKVoiceChat object, use AVAudioSessionModeVoiceChat instead. */
AVF_EXPORT NSString *const AVAudioSessionModeGameChat NS_AVAILABLE_IOS(5_0);
/* Only valid with AVAudioSessionCategoryPlayAndRecord or AVAudioSessionCategoryRecord.
Modifies the audio routing options and may engage appropriate system-supplied signal processing. */
AVF_EXPORT NSString *const AVAudioSessionModeVideoRecording NS_AVAILABLE_IOS(5_0);
/* Appropriate for applications that wish to minimize the effect of system-supplied signal
processing for input and/or output audio signals. */
AVF_EXPORT NSString *const AVAudioSessionModeMeasurement NS_AVAILABLE_IOS(5_0);
/* Engages appropriate output signal processing for movie playback scenarios. Currently
only applied during playback over built-in speaker. */
AVF_EXPORT NSString *const AVAudioSessionModeMoviePlayback NS_AVAILABLE_IOS(6_0);
/* Only valid with kAudioSessionCategory_PlayAndRecord. Reduces the number of allowable audio
routes to be only those that are appropriate for video chat applications. May engage appropriate
system-supplied signal processing. Has the side effect of setting
AVAudioSessionCategoryOptionAllowBluetooth and AVAudioSessionCategoryOptionDefaultToSpeaker. */
AVF_EXPORT NSString *const AVAudioSessionModeVideoChat NS_AVAILABLE_IOS(7_0);
/* Appropriate for applications which play spoken audio and wish to be paused (via audio session interruption) rather than ducked
if another app (such as a navigation app) plays a spoken audio prompt. Examples of apps that would use this are podcast players and
audio books. For more information, see the related category option AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers. */
AVF_EXPORT NSString *const AVAudioSessionModeSpokenAudio NS_AVAILABLE_IOS(9_0);
#pragma mark -- constants for port types --
/* input port types */
AVF_EXPORT NSString *const AVAudioSessionPortLineIn NS_AVAILABLE_IOS(6_0); /* Line level input on a dock connector */
AVF_EXPORT NSString *const AVAudioSessionPortBuiltInMic NS_AVAILABLE_IOS(6_0); /* Built-in microphone on an iOS device */
AVF_EXPORT NSString *const AVAudioSessionPortHeadsetMic NS_AVAILABLE_IOS(6_0); /* Microphone on a wired headset. Headset refers to an
accessory that has headphone outputs paired with a
microphone. */
/* output port types */
AVF_EXPORT NSString *const AVAudioSessionPortLineOut NS_AVAILABLE_IOS(6_0); /* Line level output on a dock connector */
AVF_EXPORT NSString *const AVAudioSessionPortHeadphones NS_AVAILABLE_IOS(6_0); /* Headphone or headset output */
AVF_EXPORT NSString *const AVAudioSessionPortBluetoothA2DP NS_AVAILABLE_IOS(6_0); /* Output on a Bluetooth A2DP device */
AVF_EXPORT NSString *const AVAudioSessionPortBuiltInReceiver NS_AVAILABLE_IOS(6_0); /* The speaker you hold to your ear when on a phone call */
AVF_EXPORT NSString *const AVAudioSessionPortBuiltInSpeaker NS_AVAILABLE_IOS(6_0); /* Built-in speaker on an iOS device */
AVF_EXPORT NSString *const AVAudioSessionPortHDMI NS_AVAILABLE_IOS(6_0); /* Output via High-Definition Multimedia Interface */
AVF_EXPORT NSString *const AVAudioSessionPortAirPlay NS_AVAILABLE_IOS(6_0); /* Output on a remote Air Play device */
AVF_EXPORT NSString *const AVAudioSessionPortBluetoothLE NS_AVAILABLE_IOS(7_0); /* Output on a Bluetooth Low Energy device */
/* port types that refer to either input or output */
AVF_EXPORT NSString *const AVAudioSessionPortBluetoothHFP NS_AVAILABLE_IOS(6_0); /* Input or output on a Bluetooth Hands-Free Profile device */
AVF_EXPORT NSString *const AVAudioSessionPortUSBAudio NS_AVAILABLE_IOS(6_0); /* Input or output on a Universal Serial Bus device */
AVF_EXPORT NSString *const AVAudioSessionPortCarAudio NS_AVAILABLE_IOS(7_0); /* Input or output via Car Audio */
#pragma mark -- constants for data source locations, orientations, polar patterns, and channel roles --
/* The following represent the location of a data source on an iOS device. */
AVF_EXPORT NSString *const AVAudioSessionLocationUpper NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionLocationLower NS_AVAILABLE_IOS(7_0);
/* The following represent the orientation or directionality of a data source on an iOS device. */
AVF_EXPORT NSString *const AVAudioSessionOrientationTop NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionOrientationBottom NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionOrientationFront NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionOrientationBack NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionOrientationLeft NS_AVAILABLE_IOS(8_0);
AVF_EXPORT NSString *const AVAudioSessionOrientationRight NS_AVAILABLE_IOS(8_0);
/* The following represent the possible polar patterns for a data source on an iOS device. */
AVF_EXPORT NSString *const AVAudioSessionPolarPatternOmnidirectional NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionPolarPatternCardioid NS_AVAILABLE_IOS(7_0);
AVF_EXPORT NSString *const AVAudioSessionPolarPatternSubcardioid NS_AVAILABLE_IOS(7_0);
#pragma mark -- helper class interfaces --
/*
AVAudioSessionChannelDescription objects provide information about a port's audio channels.
AudioQueues, AURemoteIO and AUVoiceIO instances can be assigned to communicate with specific
hardware channels by setting an array of <port UID, channel index> pairs.
*/
NS_CLASS_AVAILABLE(NA, 6_0)
@interface AVAudioSessionChannelDescription : NSObject {
@private
void *_impl;
}
@property(readonly) NSString * channelName;
@property(readonly) NSString * owningPortUID; /* the unique identifier (UID) for the channel's owning port */
@property(readonly) NSUInteger channelNumber; /* the index of this channel in its owning port's array of channels */
@property(readonly) AudioChannelLabel channelLabel; /* description of the physical location of this channel. */
@end
NS_CLASS_AVAILABLE(NA, 6_0)
@interface AVAudioSessionPortDescription : NSObject {
@private
void *_impl;
}
/* Value is one of the AVAudioSessionPort constants declared above. */
@property (readonly) NSString *portType;
/* A descriptive name for the port */
@property (readonly) NSString *portName;
/* A system-assigned unique identifier for the port */
@property (readonly) NSString *UID;
/* This property's value will be true if the associated hardware port has built-in processing for two-way
voice communication. Applications that use their own proprietary voice processing algorithms should use
this property to decide when to disable processing. On the other hand, if using Apple's Voice Processing
I/O unit (subtype kAudioUnitSubType_VoiceProcessingIO), the system will automatically manage this for the
application. In particular, ports of type AVAudioSessionPortBluetoothHFP and AVAudioSessionPortCarAudio
often have hardware voice processing. */
@property (readonly) BOOL hasHardwareVoiceCallProcessing API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0));
@property (readonly, nullable) NSArray<AVAudioSessionChannelDescription *> *channels;
/* Will be nil if there are no selectable data sources. */
@property (readonly, nullable) NSArray<AVAudioSessionDataSourceDescription *> *dataSources NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Will be nil if there are no selectable data sources. In all other cases, this
property reflects the currently selected data source.*/
@property (readonly, nullable) AVAudioSessionDataSourceDescription *selectedDataSource NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* This property reflects the application's preferred data source for the Port.
Will be nil if there are no selectable data sources or if no preference has been set.*/
@property (readonly, nullable) AVAudioSessionDataSourceDescription *preferredDataSource NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Select the preferred data source for this port. The input dataSource parameter must be one of the dataSources exposed by
the dataSources property. Setting a nil value will clear the preference.
Note: if the port is part of the active audio route, changing the data source will likely
result in a route reconfiguration. If the port is not part of the active route, selecting a new data source will
not result in an immediate route reconfiguration. Use AVAudioSession's setPreferredInput:error: method to activate the port. */
- (BOOL)setPreferredDataSource:(nullable AVAudioSessionDataSourceDescription *)dataSource error:(NSError **)outError NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
@end
NS_CLASS_AVAILABLE(NA, 6_0)
@interface AVAudioSessionRouteDescription : NSObject {
@private
void *_impl;
}
@property (readonly) NSArray<AVAudioSessionPortDescription *> *inputs;
@property (readonly) NSArray<AVAudioSessionPortDescription *> *outputs;
@end
NS_CLASS_AVAILABLE(NA, 6_0)
@interface AVAudioSessionDataSourceDescription : NSObject {
@private
void *_impl;
}
/* system-assigned ID for the data source */
@property (readonly) NSNumber *dataSourceID;
/* human-readable name for the data source */
@property (readonly) NSString *dataSourceName;
/* Location and orientation can be used to distinguish between multiple data sources belonging to a single port. For example, in the case of a port of type AVAudioSessionPortBuiltInMic, one can
use these properties to differentiate between an upper/front-facing microphone and a lower/bottom-facing microphone. */
/* Describes the general location of a data source. Will be nil for data sources for which the location is not known. */
@property (readonly, nullable) NSString *location NS_AVAILABLE_IOS(7_0);
/* Describes the orientation of a data source. Will be nil for data sources for which the orientation is not known. */
@property (readonly, nullable) NSString *orientation NS_AVAILABLE_IOS(7_0);
/* Array of one or more NSStrings describing the supported polar patterns for a data source. Will be nil for data sources that have no selectable patterns. */
@property (readonly, nullable) NSArray<NSString *> *supportedPolarPatterns NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Describes the currently selected polar pattern. Will be nil for data sources that have no selectable patterns. */
@property (readonly, nullable) NSString *selectedPolarPattern NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Describes the preferred polar pattern. Will be nil for data sources that have no selectable patterns or if no preference has been set. */
@property (readonly, nullable) NSString *preferredPolarPattern NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
/* Select the desired polar pattern from the set of available patterns. Setting a nil value will clear the preference.
Note: if the owning port and data source are part of the active audio route,
changing the polar pattern will likely result in a route reconfiguration. If the owning port and data source are not part of the active route,
selecting a polar pattern will not result in an immediate route reconfiguration. Use AVAudioSession's setPreferredInput:error: method
to activate the port. Use setPreferredDataSource:error: to active the data source on the port. */
- (BOOL)setPreferredPolarPattern:(nullable NSString *)pattern error:(NSError **)outError NS_AVAILABLE_IOS(7_0) __WATCHOS_PROHIBITED;
@end
#pragma mark -- Deprecated API --
@interface AVAudioSession (AVAudioSessionDeprecated)
/* The delegate property is deprecated. Instead, you should register for the NSNotifications named below. */
/* For example:
[[NSNotificationCenter defaultCenter] addObserver: myObject
selector: @selector(handleInterruption:)
name: AVAudioSessionInterruptionNotification
object: [AVAudioSession sharedInstance]];
*/
@property (assign, nullable) id<AVAudioSessionDelegate> delegate NS_DEPRECATED_IOS(4_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
/* AVAudioSession is a singleton. Use +sharedInstance instead of -init */
- (instancetype)init NS_DEPRECATED_IOS(3_0, 10_0) __WATCHOS_PROHIBITED;
- (BOOL)setActive:(BOOL)active withFlags:(NSInteger)flags error:(NSError **)outError NS_DEPRECATED_IOS(4_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
@property (readonly) BOOL inputIsAvailable NS_DEPRECATED_IOS(3_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED; /* is input hardware available or not? */
/* deprecated. Use the corresponding properties without "Hardware" in their names. */
@property (readonly) double currentHardwareSampleRate NS_DEPRECATED_IOS(3_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
@property (readonly) NSInteger currentHardwareInputNumberOfChannels NS_DEPRECATED_IOS(3_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
@property (readonly) NSInteger currentHardwareOutputNumberOfChannels NS_DEPRECATED_IOS(3_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
- (BOOL)setPreferredHardwareSampleRate:(double)sampleRate error:(NSError **)outError NS_DEPRECATED_IOS(3_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
@property (readonly) double preferredHardwareSampleRate NS_DEPRECATED_IOS(3_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
@end
#pragma mark -- AVAudioSessionDelegate protocol --
/* The AVAudioSessionDelegate protocol is deprecated. Instead you should register for notifications. */
__TVOS_PROHIBITED __WATCHOS_PROHIBITED
@protocol AVAudioSessionDelegate <NSObject>
@optional
- (void)beginInterruption; /* something has caused your audio session to be interrupted */
/* the interruption is over */
- (void)endInterruptionWithFlags:(NSUInteger)flags NS_AVAILABLE_IOS(4_0); /* Currently the only flag is AVAudioSessionInterruptionFlags_ShouldResume. */
- (void)endInterruption; /* endInterruptionWithFlags: will be called instead if implemented. */
/* notification for input become available or unavailable */
- (void)inputIsAvailableChanged:(BOOL)isInputAvailable;
@end
#pragma mark -- Deprecated enumerations --
/* Deprecated in iOS 6.0. Use AVAudioSessionInterruptionOptions instead.
Flags passed to you when endInterruptionWithFlags: is called on the delegate */
enum {
AVAudioSessionInterruptionFlags_ShouldResume = 1
} NS_DEPRECATED_IOS(4_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
/* Deprecated in iOS 6.0. Use AVAudioSessionSetActiveOptions instead.
flags for use when calling setActive:withFlags:error: */
enum {
AVAudioSessionSetActiveFlags_NotifyOthersOnDeactivation = 1
} NS_DEPRECATED_IOS(4_0, 6_0) __TVOS_PROHIBITED __WATCHOS_PROHIBITED;
#endif // ! TARGET_OS_OSX
NS_ASSUME_NONNULL_END

View file

@ -1,71 +0,0 @@
/*
File: AVAudioSettings.h
Framework: AVFoundation
Copyright 2008-2013 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/NSObject.h>
#import <Availability.h>
/* This file's methods are available with iPhone 3.0 or later */
/* property keys - values for all keys defined below are NSNumbers */
/* keys for all formats */
AVF_EXPORT NSString *const AVFormatIDKey; /* value is an integer (format ID) from CoreAudioTypes.h */
AVF_EXPORT NSString *const AVSampleRateKey; /* value is floating point in Hertz */
AVF_EXPORT NSString *const AVNumberOfChannelsKey; /* value is an integer */
/* linear PCM keys */
AVF_EXPORT NSString *const AVLinearPCMBitDepthKey; /* value is an integer, one of: 8, 16, 24, 32 */
AVF_EXPORT NSString *const AVLinearPCMIsBigEndianKey; /* value is a BOOL */
AVF_EXPORT NSString *const AVLinearPCMIsFloatKey; /* value is a BOOL */
AVF_EXPORT NSString *const AVLinearPCMIsNonInterleaved NS_AVAILABLE(10_7, 4_0); /* value is a BOOL */
#define AVLinearPCMIsNonInterleavedKey AVLinearPCMIsNonInterleaved
/* audio file type key */
AVF_EXPORT NSString *const AVAudioFileTypeKey API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0)); /* value is an integer (audio file type) from AudioFile.h */
/* encoder property keys */
AVF_EXPORT NSString *const AVEncoderAudioQualityKey; /* value is an integer from enum AVAudioQuality */
AVF_EXPORT NSString *const AVEncoderAudioQualityForVBRKey NS_AVAILABLE(10_9, 7_0); /* value is an integer from enum AVAudioQuality. only relevant for AVAudioBitRateStrategy_Variable */
/* only one of AVEncoderBitRateKey and AVEncoderBitRatePerChannelKey should be provided. */
AVF_EXPORT NSString *const AVEncoderBitRateKey; /* value is an integer. */
AVF_EXPORT NSString *const AVEncoderBitRatePerChannelKey NS_AVAILABLE(10_7, 4_0); /* value is an integer */
AVF_EXPORT NSString *const AVEncoderBitRateStrategyKey NS_AVAILABLE(10_9, 7_0); /* value is an AVAudioBitRateStrategy constant. see below. */
AVF_EXPORT NSString *const AVEncoderBitDepthHintKey; /* value is an integer from 8 to 32 */
/* sample rate converter property keys */
AVF_EXPORT NSString *const AVSampleRateConverterAlgorithmKey NS_AVAILABLE(10_9, 7_0); /* value is an AVSampleRateConverterAlgorithm constant. see below. */
AVF_EXPORT NSString *const AVSampleRateConverterAudioQualityKey; /* value is an integer from enum AVAudioQuality */
/* channel layout */
AVF_EXPORT NSString *const AVChannelLayoutKey NS_AVAILABLE(10_7, 4_0); /* value is an NSData containing an AudioChannelLayout */
/* property values */
/* values for AVEncoderBitRateStrategyKey */
AVF_EXPORT NSString *const AVAudioBitRateStrategy_Constant NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT NSString *const AVAudioBitRateStrategy_LongTermAverage NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT NSString *const AVAudioBitRateStrategy_VariableConstrained NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT NSString *const AVAudioBitRateStrategy_Variable NS_AVAILABLE(10_9, 7_0);
/* values for AVSampleRateConverterAlgorithmKey */
AVF_EXPORT NSString *const AVSampleRateConverterAlgorithm_Normal NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT NSString *const AVSampleRateConverterAlgorithm_Mastering NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT NSString *const AVSampleRateConverterAlgorithm_MinimumPhase API_AVAILABLE(macos(10.12), ios(10.0), watchos(3.0), tvos(10.0));
typedef NS_ENUM(NSInteger, AVAudioQuality) {
AVAudioQualityMin = 0,
AVAudioQualityLow = 0x20,
AVAudioQualityMedium = 0x40,
AVAudioQualityHigh = 0x60,
AVAudioQualityMax = 0x7F
};

View file

@ -1,145 +0,0 @@
/*
File: AVAudioTime.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAudioTime
@abstract Represent a moment in time.
@discussion
AVAudioTime is used in AVAudioEngine to represent time. Instances are immutable.
A single moment in time may be represented in two different ways:
1. mach_absolute_time(), the system's basic clock. Commonly referred to as "host time."
2. audio samples at a particular sample rate
A single AVAudioTime instance may contain either or both representations; it might
represent only a sample time, only a host time, or both.
Rationale for using host time:
[a] internally we are using AudioTimeStamp, which uses host time, and it seems silly to divide
[b] it is consistent with a standard system timing service
[c] we do provide conveniences to convert between host ticks and seconds (host time divided by
frequency) so client code wanting to do what should be straightforward time computations can at
least not be cluttered by ugly multiplications and divisions by the host clock frequency.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioTime : NSObject {
@private
AudioTimeStamp _ats;
double _sampleRate;
void *_reserved;
}
/*! @method initWithAudioTimeStamp:sampleRate:
*/
- (instancetype)initWithAudioTimeStamp: (const AudioTimeStamp *)ts sampleRate: (double)sampleRate;
/*! @method initWithHostTime:
*/
- (instancetype)initWithHostTime:(uint64_t)hostTime;
/*! @method initWithSampleTime:atRate:
*/
- (instancetype)initWithSampleTime:(AVAudioFramePosition)sampleTime atRate:(double)sampleRate;
/*! @method initWithHostTime:sampleTime:atRate:
*/
- (instancetype)initWithHostTime:(uint64_t)hostTime sampleTime:(AVAudioFramePosition)sampleTime atRate:(double)sampleRate;
/*! @method timeWithAudioTimeStamp:sampleRate:
*/
+ (instancetype)timeWithAudioTimeStamp: (const AudioTimeStamp *)ts sampleRate: (double)sampleRate;
/*! @method timeWithHostTime:
*/
+ (instancetype)timeWithHostTime:(uint64_t)hostTime;
/*! @method timeWithSampleTime:atRate:
*/
+ (instancetype)timeWithSampleTime:(AVAudioFramePosition)sampleTime atRate:(double)sampleRate;
/*! @method timeWithHostTime:sampleTime:atRate:
*/
+ (instancetype)timeWithHostTime:(uint64_t)hostTime sampleTime:(AVAudioFramePosition)sampleTime atRate:(double)sampleRate;
/*! @method hostTimeForSeconds:
@abstract Convert seconds to host time.
*/
+ (uint64_t)hostTimeForSeconds:(NSTimeInterval)seconds;
/*! @method secondsForHostTime:
@abstract Convert host time to seconds.
*/
+ (NSTimeInterval)secondsForHostTime:(uint64_t)hostTime;
/*! @method extrapolateTimeFromAnchor:
@abstract Converts between host and sample time.
@param anchorTime
An AVAudioTime with a more complete AudioTimeStamp than that of the receiver (self).
@return
the extrapolated time
@discussion
If anchorTime is an AVAudioTime where both host time and sample time are valid,
and self is another timestamp where only one of the two is valid, this method
returns a new AVAudioTime copied from self and where any additional valid fields provided by
the anchor are also valid.
Note that the anchorTime must have both host and sample time valid, and self must have
sample rate and at least one of host or sample time valid. Otherwise this method returns nil.
<pre>
// time0 has a valid audio sample representation, but no host time representation.
AVAudioTime *time0 = [AVAudioTime timeWithSampleTime: 0.0 atRate: 44100.0];
// anchor has a valid host time representation and sample time representation.
AVAudioTime *anchor = [player playerTimeForNodeTime: player.lastRenderTime];
// fill in valid host time representation
AVAudioTime *fullTime0 = [time0 extrapolateTimeFromAnchor: anchor];
</pre>
*/
- (nullable AVAudioTime *)extrapolateTimeFromAnchor:(AVAudioTime *)anchorTime;
/*! @property hostTimeValid
@abstract Whether the hostTime property is valid.
*/
@property (nonatomic, readonly, getter=isHostTimeValid) BOOL hostTimeValid;
/*! @property hostTime
@abstract The host time.
*/
@property (nonatomic, readonly) uint64_t hostTime;
/*! @property sampleTimeValid
@abstract Whether the sampleTime and sampleRate properties are valid.
*/
@property (nonatomic, readonly, getter=isSampleTimeValid) BOOL sampleTimeValid;
/*! @property sampleTime
@abstract The time as a number of audio samples, as tracked by the current audio device.
*/
@property (nonatomic, readonly) AVAudioFramePosition sampleTime;
/*! @property sampleRate
@abstract The sample rate at which sampleTime is being expressed.
*/
@property (nonatomic, readonly) double sampleRate;
/*! @property audioTimeStamp
@abstract The time expressed as an AudioTimeStamp structure.
@discussion
This may be useful for compatibility with lower-level CoreAudio and AudioToolbox API's.
*/
@property (readonly, nonatomic) AudioTimeStamp audioTimeStamp;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,178 +0,0 @@
/*
File: AVAudioTypes.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#ifndef __AVAudioTypes_h__
#define __AVAudioTypes_h__
#import <Foundation/Foundation.h>
#import <AVFoundation/AVBase.h>
#import <CoreAudio/CoreAudioTypes.h>
/*! @typedef AVAudioFramePosition
@abstract A position in an audio file or stream.
*/
typedef int64_t AVAudioFramePosition;
/*! @typedef AVAudioFrameCount
@abstract A number of audio sample frames.
@discussion
Rationale: making this a potentially larger-than-32-bit type like NSUInteger would open the
door to a large set of runtime failures due to underlying implementations' use of UInt32.
TODO: Remove rationales.
*/
typedef uint32_t AVAudioFrameCount;
/*! @typedef AVAudioPacketCount
@abstract A number of packets of compressed audio data.
@discussion
Rationale: making this a potentially larger-than-32-bit type like NSUInteger would open the
door to a large set of runtime failures due to underlying implementations' use of UInt32.
TODO: Remove rationales.
*/
typedef uint32_t AVAudioPacketCount;
/*! @typedef AVAudioChannelCount
@abstract A number of audio channels.
@discussion
Rationale: making this a potentially larger-than-32-bit type like NSUInteger would open the
door to a large set of runtime failures due to underlying implementations' use of UInt32.
*/
typedef uint32_t AVAudioChannelCount;
/*! @typedef AVAudioNodeCompletionHandler
@abstract Generic callback handler.
@discussion
Various AVAudioEngine objects issue callbacks to generic blocks of this type. In general
the callback arrives on a non-main thread and it is the client's responsibility to handle it
in a thread-safe manner.
*/
typedef void (^AVAudioNodeCompletionHandler)(void);
/*! @typedef AVAudioNodeBus
@abstract The index of a bus on an AVAudioNode.
@discussion
@link AVAudioNode @/link objects potentially have multiple input and/or output busses.
AVAudioNodeBus represents a bus as a zero-based index.
*/
typedef NSUInteger AVAudioNodeBus;
/*=============================================================================*/
/*! @struct AVAudio3DPoint
@abstract Struct representing a point in 3D space
@discussion
This struct is used by classes dealing with 3D audio such as `AVAudioMixing`
and `AVAudioEnvironmentNode` and represents a point in 3D space.
*/
struct AVAudio3DPoint {
float x;
float y;
float z;
};
typedef struct AVAudio3DPoint AVAudio3DPoint;
/*! @method AVAudioMake3DPoint
@abstract Creates and returns an AVAudio3DPoint object
*/
NS_INLINE AVAudio3DPoint AVAudioMake3DPoint(float x, float y, float z) {
AVAudio3DPoint p;
p.x = x;
p.y = y;
p.z = z;
return p;
}
/*! @typedef AVAudio3DVector
@abstract Struct representing a vector in 3D space
@discussion
This struct is used by classes dealing with 3D audio such as @link AVAudioMixing @/link
and @link AVAudioEnvironmentNode @/link and represents a vector in 3D space.
*/
typedef struct AVAudio3DPoint AVAudio3DVector;
/*! @method AVAudio3DVector
@abstract Creates and returns an AVAudio3DVector object
*/
NS_INLINE AVAudio3DVector AVAudioMake3DVector(float x, float y, float z) {
AVAudio3DVector v;
v.x = x;
v.y = y;
v.z = z;
return v;
}
/*! @struct AVAudio3DVectorOrientation
@abstract Struct representing the orientation of the listener in 3D space
@discussion
Two orthogonal vectors describe the orientation of the listener. The forward
vector points in the direction that the listener is facing. The up vector is orthogonal
to the forward vector and points upwards from the listener's head.
*/
struct AVAudio3DVectorOrientation {
AVAudio3DVector forward;
AVAudio3DVector up;
};
typedef struct AVAudio3DVectorOrientation AVAudio3DVectorOrientation;
/*! @method AVAudioMake3DVectorOrientation
@abstract Creates and returns an AVAudio3DVectorOrientation object
*/
NS_INLINE AVAudio3DVectorOrientation AVAudioMake3DVectorOrientation(AVAudio3DVector forward, AVAudio3DVector up) {
AVAudio3DVectorOrientation o;
o.forward = forward;
o.up = up;
return o;
}
/*! @struct AVAudio3DAngularOrientation
@abstract Struct representing the orientation of the listener in 3D space
@discussion
Three angles describe the orientation of a listener's head - yaw, pitch and roll.
Yaw describes the side to side movement of the listener's head.
The yaw axis is perpendicular to the plane of the listener's ears with its origin at the
center of the listener's head and directed towards the bottom of the listener's head. A
positive yaw is in the clockwise direction going from 0 to 180 degrees. A negative yaw is in
the counter-clockwise direction going from 0 to -180 degrees.
Pitch describes the up-down movement of the listener's head.
The pitch axis is perpendicular to the yaw axis and is parallel to the plane of the
listener's ears with its origin at the center of the listener's head and directed towards
the right ear. A positive pitch is the upwards direction going from 0 to 180 degrees. A
negative pitch is in the downwards direction going from 0 to -180 degrees.
Roll describes the tilt of the listener's head.
The roll axis is perpendicular to the other two axes with its origin at the center of the
listener's head and is directed towards the listener's nose. A positive roll is to the right
going from 0 to 180 degrees. A negative roll is to the left going from 0 to -180 degrees.
*/
struct AVAudio3DAngularOrientation {
float yaw;
float pitch;
float roll;
};
typedef struct AVAudio3DAngularOrientation AVAudio3DAngularOrientation;
/*! @method AVAudioMake3DAngularOrientation
@abstract Creates and returns an AVAudio3DAngularOrientation object
*/
NS_INLINE AVAudio3DAngularOrientation AVAudioMake3DAngularOrientation(float yaw, float pitch, float roll) {
AVAudio3DAngularOrientation o;
o.yaw = yaw;
o.pitch = pitch;
o.roll = roll;
return o;
}
#endif // __AVAudioTypes_h__

View file

@ -1,113 +0,0 @@
/*
File: AVAudioUnit.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioNode.h>
#if __has_include(<AudioToolbox/AudioUnit.h>)
#define AVAUDIOUNIT_HAVE_AUDIOUNIT 1
#import <AudioToolbox/AudioUnit.h>
#endif
NS_ASSUME_NONNULL_BEGIN
#if __OBJC2__
@class AUAudioUnit;
#endif // __OBJC2__
/*! @class AVAudioUnit
@abstract An AVAudioNode implemented by an audio unit.
@discussion
An AVAudioUnit is an AVAudioNode implemented by an audio unit. Depending on the type of
the audio unit, audio is processed either in real-time or non real-time.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnit : AVAudioNode
#if AVAUDIOUNIT_HAVE_AUDIOUNIT
/*! @method instantiateWithComponentDescription:options:completionHandler:
@abstract Asynchronously create an instance of an audio unit component, wrapped in an AVAudioUnit.
@param audioComponentDescription
The component to instantiate.
@param options
Instantiation options.
@param completionHandler
Called in an arbitrary thread/queue context when instantiation is complete. The client
should retain the provided AVAudioUnit.
@discussion
Components whose flags include kAudioComponentFlag_RequiresAsyncInstantiation must be
instantiated asynchronously, via this method if they are to be used with AVAudioEngine.
See the discussion of this flag in AudioToolbox/AudioComponent.h.
The returned AVAudioUnit instance normally will be of a subclass (AVAudioUnitEffect,
AVAudioUnitGenerator, AVAudioUnitMIDIInstrument, or AVAudioUnitTimeEffect), selected
according to the component's type.
*/
+ (void)instantiateWithComponentDescription:(AudioComponentDescription)audioComponentDescription options:(AudioComponentInstantiationOptions)options completionHandler:(void (^)(__kindof AVAudioUnit * __nullable audioUnit, NSError * __nullable error))completionHandler NS_AVAILABLE(10_11, 9_0);
/*! @method loadAudioUnitPresetAtURL:error:
@abstract Load an audio unit preset.
@param url
NSURL of the .aupreset file.
@param outError
@discussion
If the .aupreset file cannot be successfully loaded, an error is returned.
*/
- (BOOL)loadAudioUnitPresetAtURL:(NSURL *)url error:(NSError **)outError;
/*! @property audioComponentDescription
@abstract AudioComponentDescription of the underlying audio unit.
*/
@property (nonatomic, readonly) AudioComponentDescription audioComponentDescription;
/*! @property audioUnit
@abstract Reference to the underlying audio unit.
@discussion
A reference to the underlying audio unit is provided so that parameters that are not
exposed by AVAudioUnit subclasses can be modified using the AudioUnit C API.
No operations that may conflict with state maintained by the engine should be performed
directly on the audio unit. These include changing initialization state, stream formats,
channel layouts or connections to other audio units.
*/
@property (nonatomic, readonly) AudioUnit audioUnit;
#if __OBJC2__
/*! @property AUAudioUnit
@abstract An AUAudioUnit wrapping or underlying the implementation's AudioUnit.
@discussion
This provides an AUAudioUnit which either wraps or underlies the implementation's
AudioUnit, depending on how that audio unit is packaged. Applications can interact with this
AUAudioUnit to control custom properties, select presets, change parameters, etc.
As with the audioUnit property, no operations that may conflict with state maintained by the
engine should be performed directly on the audio unit. These include changing initialization
state, stream formats, channel layouts or connections to other audio units.
*/
@property (nonatomic, readonly) AUAudioUnit *AUAudioUnit NS_AVAILABLE(10_11, 9_0);
#endif // __OBJC2__
/*! @property name
@abstract Name of the audio unit.
*/
@property (nonatomic, readonly) NSString *name;
/*! @property manufacturerName
@abstract Manufacturer name of the audio unit.
*/
@property (nonatomic, readonly) NSString *manufacturerName;
/*! @property version
@abstract Version number of the audio unit.
*/
@property (nonatomic, readonly) NSUInteger version;
#endif //AVAUDIOUNIT_HAVE_AUDIOUNIT
@end
NS_ASSUME_NONNULL_END

View file

@ -1,248 +0,0 @@
/*
File: AVAudioUnitComponent.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioTypes.h>
#if __has_include(<AudioToolbox/AudioComponent.h>)
#define AVAUDIOUNITCOMPONENT_HAVE_AUDIOCOMPONENT 1
#import <AudioToolbox/AudioComponent.h>
#import <AudioToolbox/AUComponent.h>
#endif
NS_ASSUME_NONNULL_BEGIN
// Standard Audio Unit Types
AVF_EXPORT NSString * const AVAudioUnitTypeOutput NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeMusicDevice NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeMusicEffect NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeFormatConverter NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeEffect NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeMixer NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypePanner NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeGenerator NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeOfflineEffect NS_AVAILABLE(10_10, 9_0);
AVF_EXPORT NSString * const AVAudioUnitTypeMIDIProcessor NS_AVAILABLE(10_10, 9_0);
// Standard Audio Unit Manufacturers
AVF_EXPORT NSString * const AVAudioUnitManufacturerNameApple NS_AVAILABLE(10_10, 9_0);
#pragma mark AVAudioUnitComponent
/*!
@class AVAudioUnitComponent
@discussion
AVAudioUnitComponent provides details about an audio unit such as type, subtype, manufacturer,
location etc. User tags can be added to the AVAudioUnitComponent which can be queried later
for display.
*/
NS_CLASS_AVAILABLE(10_10, 9_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitComponent : NSObject
{
void *_impl;
}
/*! @property name
@abstract the name of an audio component
*/
@property (nonatomic, readonly) NSString *name;
/*! @property typeName
@abstract standard audio component types returned as strings
*/
@property (nonatomic, readonly) NSString *typeName;
/*! @property typeName
@abstract localized string of typeName for display
*/
@property (nonatomic, readonly) NSString *localizedTypeName;
/*! @property manufacturerName
@abstract the manufacturer name, extracted from the manufacturer key defined in Info.plist dictionary
*/
@property (nonatomic, readonly) NSString *manufacturerName;
/*! @property version
@abstract version number comprised of a hexadecimal number with major, minor, dot-release format: 0xMMMMmmDD
*/
@property (nonatomic, readonly) NSUInteger version;
/*! @property versionString
@abstract version number as string
*/
@property (nonatomic, readonly) NSString *versionString;
/*! @property componentURL
@abstract URL representing location of component
*/
@property (nonatomic, readonly, nullable) NSURL *componentURL NS_DEPRECATED(10_10, 10_11, NA, NA);
/*! @property availableArchitectures
@abstract NSArray of NSNumbers each of which corresponds to one of the constants in Mach-O Architecture in NSBundle Class Reference
*/
@property (nonatomic, readonly) NSArray<NSNumber *> *availableArchitectures NS_AVAILABLE(10_10, NA);
/*! @property sandboxSafe
@abstract On OSX, YES if the AudioComponent can be loaded into a sandboxed process otherwise NO.
On iOS, this is always YES.
*/
@property (nonatomic, readonly, getter=isSandboxSafe) BOOL sandboxSafe;
/*! @property hasMIDIInput
@abstract YES if AudioComponent has midi input, otherwise NO
*/
@property (nonatomic, readonly) BOOL hasMIDIInput;
/*! @property hasMIDIOutput
@abstract YES if AudioComponent has midi output, otherwise NO
*/
@property (nonatomic, readonly) BOOL hasMIDIOutput;
#if AVAUDIOUNITCOMPONENT_HAVE_AUDIOCOMPONENT
/*! @property audioComponent
@abstract the audioComponent that can be used in AudioComponent APIs.
*/
@property (nonatomic, readonly) AudioComponent audioComponent;
#endif
/*! @property userTagNames
@abstract User tags represent the tags from the current user.
*/
@property (copy) NSArray<NSString *> *userTagNames NS_AVAILABLE(10_10, NA);
/*! @property allTagNames
@abstract represent the tags from the current user and the system tags defined by AudioComponent.
*/
@property (nonatomic, readonly) NSArray<NSString *> *allTagNames;
#if AVAUDIOUNITCOMPONENT_HAVE_AUDIOCOMPONENT
/*! @property audioComponentDescription
@abstract description of the audio component that can be used in AudioComponent APIs.
*/
@property (nonatomic, readonly) AudioComponentDescription audioComponentDescription;
#endif
/*! @property iconURL
@abstract A URL that will specify the location of an icon file that can be used when presenting UI
for this audio component.
*/
@property (nonatomic, readonly, nullable) NSURL *iconURL NS_AVAILABLE(10_10, NA);
#if !TARGET_OS_IPHONE
/*! @property icon
@abstract An icon representing the component.
@discussion
For a component originating in an app extension, the returned icon will be that of the
application containing the extension.
For components loaded from bundles, the icon will be that of the bundle.
*/
@property (nonatomic, readonly, nullable) NSImage *icon NS_AVAILABLE(10_11, NA);
#endif
/*! @property passesAUVal
@abstract YES if the AudioComponent has passed the AU validation tests, otherwise NO
*/
@property (nonatomic, readonly) BOOL passesAUVal NS_AVAILABLE(10_10, NA);
/*! @property hasCustomView
@abstract YES if the AudioComponent provides custom view, otherwise NO
*/
@property (nonatomic, readonly) BOOL hasCustomView NS_AVAILABLE(10_10, NA);
/*! @property configurationDictionary
@abstract A NSDictionary that contains information describing the capabilities of the AudioComponent.
The specific information depends on the type and the keys are defined in AudioUnitProperties.h
*/
@property (nonatomic, readonly) NSDictionary<NSString *, id> *configurationDictionary NS_AVAILABLE(10_10, NA);
/*! @property supportsNumberInputChannels: outputChannels:
@abstract returns YES if the AudioComponent supports the input/output channel configuration
*/
- (BOOL)supportsNumberInputChannels:(NSInteger)numInputChannels outputChannels:(NSInteger)numOutputChannels NS_AVAILABLE(10_10, NA);
@end
#pragma mark AVAudioUnitComponentManager
/* The notification object is an AVAudioUnitComponent object */
AVF_EXPORT NSString * const AVAudioUnitComponentTagsDidChangeNotification NS_AVAILABLE(10_10, 9_0);
/*!
@class AVAudioUnitComponentManager
@discussion
AVAudioUnitComponentManager is a singleton object that provides an easy way to find
audio components that are registered with the system. It provides methods to search and
query various information about the audio components without opening them.
Currently audio components that are audio units can only be searched.
The class also supports predefined system tags and arbitrary user tags. Each audio unit can be
tagged as part of its definition. Refer to AudioComponent.h for more details. AudioUnit Hosts
such as Logic or GarageBand can present groupings of audio units based on the tags.
Searching for audio units can be done in various ways
- using a NSPredicate that contains search strings for tags or descriptions
- using a block to match on custom criteria
- using an AudioComponentDescription
*/
NS_CLASS_AVAILABLE(10_10, 9_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitComponentManager : NSObject
{
void *_impl;
}
/*! @discussion
returns all tags associated with the current user as well as all system tags defined by
the audio unit(s).
*/
@property (nonatomic, readonly) NSArray<NSString *> *tagNames;
/*! @discussion
returns the localized standard system tags defined by the audio unit(s).
*/
@property (nonatomic, readonly) NSArray<NSString *> *standardLocalizedTagNames;
/* returns singleton instance of AVAudioUnitComponentManager */
+ (instancetype)sharedAudioUnitComponentManager;
/*!
@method componentsMatchingPredicate:
@abstract returns an array of AVAudioUnitComponent objects that match the search predicate.
@discussion
AudioComponent's information or tags can be used to build a search criteria.
For example, "typeName CONTAINS 'Effect'" or tags IN {'Sampler', 'MIDI'}"
*/
- (NSArray<AVAudioUnitComponent *> *)componentsMatchingPredicate:(NSPredicate *)predicate;
/*!
@method componentsPassingTest:
@abstract returns an array of AVAudioUnitComponent objects that pass the user provided block method.
@discussion
For each AudioComponent found by the manager, the block method will be called. If the return
value is YES then the AudioComponent is added to the resulting array else it will excluded.
This gives more control to the block provider to filter out the components returned.
*/
- (NSArray<AVAudioUnitComponent *> *)componentsPassingTest:(BOOL(^)(AVAudioUnitComponent *comp, BOOL *stop))testHandler;
#if AVAUDIOUNITCOMPONENT_HAVE_AUDIOCOMPONENT
/*!
@method componentsMatchingDescription:
@abstract returns an array of AVAudioUnitComponent objects that match the description.
@discussion
This method provides a mechanism to search for AudioComponents using AudioComponentDescription
structure. The type, subtype and manufacturer fields are used to search for audio units. A
value of 0 for any of these fields is a wildcard and returns the first match found.
*/
- (NSArray<AVAudioUnitComponent *> *)componentsMatchingDescription:(AudioComponentDescription)desc;
#endif
@end
NS_ASSUME_NONNULL_END

View file

@ -1,62 +0,0 @@
/*
File: AVAudioUnitDelay.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitEffect.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioUnitDelay
@abstract an AVAudioUnitEffect that implements a delay effect
@discussion
A delay unit delays the input signal by the specified time interval
and then blends it with the input signal. The amount of high frequency
roll-off can also be controlled in order to simulate the effect of
a tape delay.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitDelay : AVAudioUnitEffect
/*! @property delayTime
Time taken by the delayed input signal to reach the output
@abstract
Range: 0 -> 2
Default: 1
Unit: Seconds
*/
@property (nonatomic) NSTimeInterval delayTime;
/*! @property feedback
@abstract
Amount of the output signal fed back into the delay line
Range: -100 -> 100
Default: 50
Unit: Percent
*/
@property (nonatomic) float feedback;
/*! @property lowPassCutoff
@abstract
Cutoff frequency above which high frequency content is rolled off
Range: 10 -> (samplerate/2)
Default: 15000
Unit: Hertz
*/
@property (nonatomic) float lowPassCutoff;
/*! @property wetDryMix
@abstract
Blend of the wet and dry signals
Range: 0 (all dry) -> 100 (all wet)
Default: 100
Unit: Percent
*/
@property (nonatomic) float wetDryMix;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,71 +0,0 @@
/*
File: AVAudioUnitDistortion.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitEffect.h>
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSInteger, AVAudioUnitDistortionPreset) {
AVAudioUnitDistortionPresetDrumsBitBrush = 0,
AVAudioUnitDistortionPresetDrumsBufferBeats = 1,
AVAudioUnitDistortionPresetDrumsLoFi = 2,
AVAudioUnitDistortionPresetMultiBrokenSpeaker = 3,
AVAudioUnitDistortionPresetMultiCellphoneConcert = 4,
AVAudioUnitDistortionPresetMultiDecimated1 = 5,
AVAudioUnitDistortionPresetMultiDecimated2 = 6,
AVAudioUnitDistortionPresetMultiDecimated3 = 7,
AVAudioUnitDistortionPresetMultiDecimated4 = 8,
AVAudioUnitDistortionPresetMultiDistortedFunk = 9,
AVAudioUnitDistortionPresetMultiDistortedCubed = 10,
AVAudioUnitDistortionPresetMultiDistortedSquared = 11,
AVAudioUnitDistortionPresetMultiEcho1 = 12,
AVAudioUnitDistortionPresetMultiEcho2 = 13,
AVAudioUnitDistortionPresetMultiEchoTight1 = 14,
AVAudioUnitDistortionPresetMultiEchoTight2 = 15,
AVAudioUnitDistortionPresetMultiEverythingIsBroken = 16,
AVAudioUnitDistortionPresetSpeechAlienChatter = 17,
AVAudioUnitDistortionPresetSpeechCosmicInterference = 18,
AVAudioUnitDistortionPresetSpeechGoldenPi = 19,
AVAudioUnitDistortionPresetSpeechRadioTower = 20,
AVAudioUnitDistortionPresetSpeechWaves = 21
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @class AVAudioUnitDistortion
@abstract An AVAudioUnitEffect that implements a multi-stage distortion effect.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitDistortion : AVAudioUnitEffect
/*! @method loadFactoryPreset:
@abstract Load a distortion preset.
Default: AVAudioUnitDistortionPresetDrumsBitBrush
*/
-(void)loadFactoryPreset:(AVAudioUnitDistortionPreset)preset;
/*! @property preGain
@abstract
Gain applied to the signal before being distorted
Range: -80 -> 20
Default: -6
Unit: dB
*/
@property (nonatomic) float preGain;
/*! @property wetDryMix
@abstract
Blend of the distorted and dry signals
Range: 0 (all dry) -> 100 (all distorted)
Default: 50
Unit: Percent
*/
@property (nonatomic) float wetDryMix;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,166 +0,0 @@
/*
File: AVAudioUnitEQ.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitEffect.h>
NS_ASSUME_NONNULL_BEGIN
/*! @enum AVAudioUnitEQFilterType
@abstract Filter types available to use with AVAudioUnitEQ.
@discussion
Depending on the filter type, a combination of one or all of the filter parameters defined
in AVAudioUnitEQFilterParameters are used to set the filter.
AVAudioUnitEQFilterTypeParametric
Parametric filter based on Butterworth analog prototype.
Required parameters: frequency (center), bandwidth, gain
AVAudioUnitEQFilterTypeLowPass
Simple Butterworth 2nd order low pass filter
Required parameters: frequency (-3 dB cutoff at specified frequency)
AVAudioUnitEQFilterTypeHighPass
Simple Butterworth 2nd order high pass filter
Required parameters: frequency (-3 dB cutoff at specified frequency)
AVAudioUnitEQFilterTypeResonantLowPass
Low pass filter with resonance support (via bandwidth parameter)
Required parameters: frequency (-3 dB cutoff at specified frequency), bandwidth
AVAudioUnitEQFilterTypeResonantHighPass
High pass filter with resonance support (via bandwidth parameter)
Required parameters: frequency (-3 dB cutoff at specified frequency), bandwidth
AVAudioUnitEQFilterTypeBandPass
Band pass filter
Required parameters: frequency (center), bandwidth
AVAudioUnitEQFilterTypeBandStop
Band stop filter (aka "notch filter")
Required parameters: frequency (center), bandwidth
AVAudioUnitEQFilterTypeLowShelf
Low shelf filter
Required parameters: frequency (center), gain
AVAudioUnitEQFilterTypeHighShelf
High shelf filter
Required parameters: frequency (center), gain
AVAudioUnitEQFilterTypeResonantLowShelf
Low shelf filter with resonance support (via bandwidth parameter)
Required parameters: frequency (center), bandwidth, gain
AVAudioUnitEQFilterTypeResonantHighShelf
High shelf filter with resonance support (via bandwidth parameter)
Required parameters: frequency (center), bandwidth, gain
*/
typedef NS_ENUM(NSInteger, AVAudioUnitEQFilterType) {
AVAudioUnitEQFilterTypeParametric = 0,
AVAudioUnitEQFilterTypeLowPass = 1,
AVAudioUnitEQFilterTypeHighPass = 2,
AVAudioUnitEQFilterTypeResonantLowPass = 3,
AVAudioUnitEQFilterTypeResonantHighPass = 4,
AVAudioUnitEQFilterTypeBandPass = 5,
AVAudioUnitEQFilterTypeBandStop = 6,
AVAudioUnitEQFilterTypeLowShelf = 7,
AVAudioUnitEQFilterTypeHighShelf = 8,
AVAudioUnitEQFilterTypeResonantLowShelf = 9,
AVAudioUnitEQFilterTypeResonantHighShelf = 10,
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @class AVAudioUnitEQFilterParameters
@abstract Filter parameters used by AVAudioUnitEQ.
@discussion
A standalone instance of AVAudioUnitEQFilterParameters cannot be created. Only an instance
vended out by a source object (e.g. AVAudioUnitEQ) can be used.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAudioUnitEQFilterParameters : NSObject {
@private
void *_impl;
}
- (instancetype)init NS_UNAVAILABLE;
/*! @property filterType
@abstract AVAudioUnitEQFilterType
@discussion
Default: AVAudioUnitEQFilterTypeParametric
*/
@property (nonatomic) AVAudioUnitEQFilterType filterType;
/*! @property frequency
@abstract Frequency in Hertz.
@discussion
Range: 20 -> (SampleRate/2)
Unit: Hertz
*/
@property (nonatomic) float frequency;
/*! @property bandwidth
@abstract Bandwidth in octaves.
@discussion
Range: 0.05 -> 5.0
Unit: Octaves
*/
@property (nonatomic) float bandwidth;
/*! @property gain
@abstract Gain in dB.
@discussion
Range: -96 -> 24
Default: 0
Unit: dB
*/
@property (nonatomic) float gain;
/*! @property bypass
@abstract bypass state of band.
@discussion
Default: YES
*/
@property (nonatomic) BOOL bypass;
@end
/*! @class AVAudioUnitEQ
@abstract An AVAudioUnitEffect that implements a Multi-Band Equalizer.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitEQ : AVAudioUnitEffect
/*! @method initWithNumberOfBands:
@abstract Initialize the EQ with number of bands.
@param numberOfBands
The number of bands created by the EQ.
*/
- (instancetype)initWithNumberOfBands:(NSUInteger)numberOfBands;
/*! @property bands
@abstract Array of AVAudioUnitEQFilterParameters objects.
@discussion
The number of elements in the array is equal to the number of bands.
*/
@property (nonatomic, readonly) NSArray<AVAudioUnitEQFilterParameters *> *bands;
/*! @property globalGain
@abstract Overall gain adjustment applied to the signal.
@discussion
Range: -96 -> 24
Default: 0
Unit: dB
*/
@property (nonatomic) float globalGain;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,53 +0,0 @@
/*
File: AVAudioUnitEffect.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnit.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioUnitEffect
@abstract an AVAudioUnit that processes audio in real-time
@discussion
An AVAudioUnitEffect represents an audio unit of type kAudioUnitType_Effect,
kAudioUnitType_MusicEffect, kAudioUnitType_Panner, kAudioUnitType_RemoteEffect or
kAudioUnitType_RemoteMusicEffect.
These effects run in real-time and process some x number of audio input
samples to produce x number of audio output samples. A delay unit is an
example of an effect unit.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitEffect : AVAudioUnit
#if AVAUDIOUNIT_HAVE_AUDIOUNIT
/*! @method initWithAudioComponentDescription:
@abstract Create an AVAudioUnitEffect object.
@param audioComponentDescription
@abstract AudioComponentDescription of the audio unit to be instantiated.
@discussion
The componentType must be one of these types
kAudioUnitType_Effect
kAudioUnitType_MusicEffect
kAudioUnitType_Panner
kAudioUnitType_RemoteEffect
kAudioUnitType_RemoteMusicEffect
*/
- (instancetype)initWithAudioComponentDescription:(AudioComponentDescription)audioComponentDescription;
#endif
/*! @property bypass
@abstract Bypass state of the audio unit.
*/
@property (nonatomic) BOOL bypass;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,43 +0,0 @@
/*
File: AVAudioUnitGenerator.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnit.h>
#import <AVFAudio/AVAudioMixing.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioUnitGenerator
@abstract an AVAudioUnit that generates audio output
@discussion
An AVAudioUnitGenerator represents an audio unit of type kAudioUnitType_Generator or
kAudioUnitType_RemoteGenerator.
A generator will have no audio input, but will just produce audio output.
A tone generator is an example of this.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitGenerator : AVAudioUnit <AVAudioMixing>
#if AVAUDIOUNIT_HAVE_AUDIOUNIT
/*! @method initWithAudioComponentDescription:
@abstract Create an AVAudioUnitGenerator object.
@param audioComponentDescription
@abstract AudioComponentDescription of the audio unit to be instantiated.
@discussion
The componentType must be kAudioUnitType_Generator or kAudioUnitType_RemoteGenerator
*/
- (instancetype)initWithAudioComponentDescription:(AudioComponentDescription)audioComponentDescription;
#endif
/*! @property bypass
@abstract Bypass state of the audio unit.
*/
@property (nonatomic) BOOL bypass;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,177 +0,0 @@
/*
File: AVAudioUnitMIDIInstrument.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnit.h>
#import <AVFAudio/AVAudioMixing.h>
#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100
#define AVAudioUnitMIDIInstrument_MixingConformance <AVAudioMixing>
#elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 90000
#define AVAudioUnitMIDIInstrument_MixingConformance <AVAudioMixing>
#else
#define AVAudioUnitMIDIInstrument_MixingConformance
#endif
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAudioUnitMIDIInstrument
@abstract Base class for sample synthesizers.
@discussion
This base class represents audio units of type kAudioUnitType_MusicDevice or kAudioUnitType_RemoteInstrument. This can be used in a chain
that processes realtime input (live) and has general concept of music events i.e. notes.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitMIDIInstrument : AVAudioUnit AVAudioUnitMIDIInstrument_MixingConformance
#if AVAUDIOUNIT_HAVE_AUDIOUNIT
/*! @method initWithAudioComponentDescription:
@abstract initialize the node with the component description
@param description
audio component description structure that describes the audio component of type kAudioUnitType_MusicDevice
or kAudioUnitType_RemoteInstrument.
*/
- (instancetype)initWithAudioComponentDescription:(AudioComponentDescription)description;
#endif
/*! @method startNote:withVelocity:onChannel:
@abstract sends a MIDI Note On event to the instrument
@param note
the note number (key) to play.
Range: 0 -> 127
@param velocity
specifies the volume with which the note is played.
Range: 0 -> 127
@param channel
the channel number to which the event is sent.
*/
- (void)startNote:(uint8_t)note withVelocity:(uint8_t)velocity onChannel:(uint8_t)channel;
/*! @method stopNote:onChannel:
@abstract sends a MIDI Note Off event to the instrument
@param note
the note number (key) to stop
Range: 0 -> 127
@param channel
the channel number to which the event is sent.
*/
- (void)stopNote:(uint8_t)note onChannel:(uint8_t)channel;
/*! @method sendController:withValue:onChannel:
@abstract send a MIDI controller event to the instrument.
@param controller
a standard MIDI controller number.
Range: 0 -> 127
@param value
value for the controller.
Range: 0 -> 127
@param channel
the channel number to which the event is sent.
*/
- (void)sendController:(uint8_t)controller withValue:(uint8_t)value onChannel:(uint8_t)channel;
/*! @method sendPitchBend:onChannel:
@abstract sends MIDI Pitch Bend event to the instrument.
@param pitchbend
value of the pitchbend
Range: 0 -> 16383
@param channel
the channel number to which the pitch bend message is sent
*/
- (void)sendPitchBend:(uint16_t)pitchbend onChannel:(uint8_t)channel;
/*! @method sendPressure:onChannel:
@abstract sends MIDI channel pressure event to the instrument.
@param pressure
value of the pressure.
Range: 0 -> 127
@param channel
the channel number to which the event is sent.
*/
- (void)sendPressure:(uint8_t)pressure onChannel:(uint8_t)channel;
/*! @method sendPressureForKey:withValue:onChannel:
@abstract sends MIDI Polyphonic key pressure event to the instrument
@param key
the key (note) number to which the pressure event applies
Range: 0 -> 127
@param value
value of the pressure
Range: 0 -> 127
@param channel
channel number to which the event is sent.
*/
- (void)sendPressureForKey:(uint8_t)key withValue:(uint8_t)value onChannel:(uint8_t)channel;
/*! @method sendProgramChange:onChannel:
@abstract sends MIDI Program Change event to the instrument
@param program
the program number.
Range: 0 -> 127
@param channel
channel number to which the event is sent.
@discussion
the instrument will be loaded from the bank that has been previous set by MIDI Bank Select
controller messages (0 and 31). If none has been set, bank 0 will be used.
*/
- (void)sendProgramChange:(uint8_t)program onChannel:(uint8_t)channel;
/*! @method sendProgramChange:bankMSB:bankLSB:onChannel:
@abstract sends a MIDI Program Change and Bank Select events to the instrument
@param program
specifies the program (preset) number within the bank to load.
Range: 0 -> 127
@param bankMSB
specifies the most significant byte value for the bank to select.
Range: 0 -> 127
@param bankLSB
specifies the least significant byte value for the bank to select.
Range: 0 -> 127
@param channel
channel number to which the events are sent.
@discussion
*/
- (void)sendProgramChange:(uint8_t)program bankMSB:(uint8_t)bankMSB bankLSB:(uint8_t)bankLSB onChannel:(uint8_t)channel;
/*! @method sendMIDIEvent:data1:data2:
@abstract sends a MIDI event which contains two data bytes to the instrument.
@param midiStatus
the STATUS value of the MIDI event
@param data1
the first data byte of the MIDI event
@param data2
the second data byte of the MIDI event.
*/
- (void)sendMIDIEvent:(uint8_t)midiStatus data1:(uint8_t)data1 data2:(uint8_t)data2;
/*! @method sendMIDIEvent:data1:
@abstract sends a MIDI event which contains one data byte to the instrument.
@param midiStatus
the STATUS value of the MIDI event
@param data1
the first data byte of the MIDI event
*/
- (void)sendMIDIEvent:(uint8_t)midiStatus data1:(uint8_t)data1;
/*! @method sendMIDISysExEvent:
@abstract sends a MIDI System Exclusive event to the instrument.
@param midiData
a NSData object containing the complete SysEx data including start(F0) and termination(F7) bytes.
*/
- (void)sendMIDISysExEvent:(NSData *)midiData;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,55 +0,0 @@
/*
File: AVAudioUnitReverb.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitEffect.h>
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSInteger, AVAudioUnitReverbPreset) {
AVAudioUnitReverbPresetSmallRoom = 0,
AVAudioUnitReverbPresetMediumRoom = 1,
AVAudioUnitReverbPresetLargeRoom = 2,
AVAudioUnitReverbPresetMediumHall = 3,
AVAudioUnitReverbPresetLargeHall = 4,
AVAudioUnitReverbPresetPlate = 5,
AVAudioUnitReverbPresetMediumChamber = 6,
AVAudioUnitReverbPresetLargeChamber = 7,
AVAudioUnitReverbPresetCathedral = 8,
AVAudioUnitReverbPresetLargeRoom2 = 9,
AVAudioUnitReverbPresetMediumHall2 = 10,
AVAudioUnitReverbPresetMediumHall3 = 11,
AVAudioUnitReverbPresetLargeHall2 = 12
} NS_ENUM_AVAILABLE(10_10, 8_0);
/*! @class AVAudioUnitReverb
@abstract an AVAudioUnitEffect that implements a reverb
@discussion
A reverb simulates the acoustic characteristics of a particular environment.
Use the different presets to simulate a particular space and blend it in with
the original signal using the wetDryMix parameter.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitReverb : AVAudioUnitEffect
/*! @method loadFactoryPreset:
@abstract load a reverb preset
Default: AVAudioUnitReverbPresetMediumHall
*/
- (void)loadFactoryPreset:(AVAudioUnitReverbPreset)preset;
/*! @property wetDryMix
@abstract
Blend of the wet and dry signals
Range: 0 (all dry) -> 100 (all wet)
Unit: Percent
*/
@property (nonatomic) float wetDryMix;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,103 +0,0 @@
/*
File: AVAudioUnitSampler.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitMIDIInstrument.h>
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAudioUnitSampler
@abstract Apple's sampler audio unit.
@discussion
An AVAudioUnit for Apple's Sampler Audio Unit. The sampler can be configured by loading
instruments from different types of files such as an aupreset, a DLS or SF2 sound bank,
an EXS24 instrument, a single audio file, or an array of audio files.
The output is a single stereo bus.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitSampler : AVAudioUnitMIDIInstrument
/*! @method loadSoundBankInstrumentAtURL:program:bankMSB:bankLSB:error:
@abstract loads a specific instrument from the specified sound bank
@param bankURL
URL for a Soundbank file. The file can be either a DLS bank (.dls) or a SoundFont bank (.sf2).
@param program
program number for the instrument to load
@param bankMSB
MSB for the bank number for the instrument to load. This is usually 0x79 for melodic
instruments and 0x78 for percussion instruments.
@param bankLSB
LSB for the bank number for the instrument to load. This is often 0, and represents the "bank variation".
@param outError
the status of the operation
@discussion
This method reads from file and allocates memory, so it should not be called on a real time thread.
*/
- (BOOL)loadSoundBankInstrumentAtURL:(NSURL *)bankURL program:(uint8_t)program bankMSB:(uint8_t)bankMSB bankLSB:(uint8_t)bankLSB error:(NSError **)outError;
/*! @method loadInstrumentAtURL:error:
@abstract configures the sampler by loading the specified preset file.
@param instrumentURL
URL to the preset file or audio file
@param outError
the status of the operation
@discussion
The file can be of one of the following types: Logic/GarageBand EXS24 instrument,
the Sampler AU's native aupreset, or an audio file (eg. .caf, .aiff, .wav, .mp3).
If an audio file URL is loaded, it will become the sole sample in a new default instrument.
Any information contained in the file regarding its keyboard placement (e.g. root key,
key range) will be used.
This method reads from file and allocates memory, so it should not be called on a real time thread.
*/
- (BOOL)loadInstrumentAtURL:(NSURL *)instrumentURL error:(NSError **)outError;
/*! @method loadAudioFilesAtURLs:error:
@abstract configures the sampler by loading a set of audio files.
@param audioFiles
array of URLs for audio files to be loaded
@param outError
the status of the operation
@discussion
The audio files are loaded into a new default instrument with each audio file placed
into its own sampler zone. Any information contained in the audio file regarding
their placement on the keyboard (e.g. root key, key range) will be used.
This method reads from file and allocates memory, so it should not be called on a real time thread.
*/
- (BOOL)loadAudioFilesAtURLs:(NSArray<NSURL *> *)audioFiles error:(NSError **)outError;
/*! @property stereoPan
@abstract
adjusts the pan for all the notes played.
Range: -1 -> +1
Default: 0
*/
@property (nonatomic) float stereoPan;
/*! @property masterGain
@abstract
adjusts the gain of all the notes played
Range: -90.0 -> +12 db
Default: 0 db
*/
@property (nonatomic) float masterGain;
/*! @property globalTuning
@abstract
adjusts the tuning of all the notes played.
Range: -2400 -> +2400 cents
Default: 0
*/
@property (nonatomic) float globalTuning;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,42 +0,0 @@
/*
File: AVAudioUnitTimeEffect.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnit.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioUnitTimeEffect
@abstract an AVAudioUnit that processes audio in non real-time
@discussion
An AVAudioUnitTimeEffect represents an audio unit of type aufc.
These effects do not process audio in real-time. The varispeed
unit is an example of a time effect unit.
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitTimeEffect : AVAudioUnit
#if AVAUDIOUNIT_HAVE_AUDIOUNIT
/*! @method initWithAudioComponentDescription:
@abstract create an AVAudioUnitTimeEffect object
@param audioComponentDescription
@abstract AudioComponentDescription of the audio unit to be initialized
@discussion
The componentType must be kAudioUnitType_FormatConverter
*/
- (instancetype)initWithAudioComponentDescription:(AudioComponentDescription)audioComponentDescription;
#endif
/*! @property bypass
@abstract bypass state of the audio unit
*/
@property (nonatomic) BOOL bypass;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,56 +0,0 @@
/*
File: AVAudioUnitTimePitch.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitTimeEffect.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioUnitTimePitch
@abstract an AVAudioUnitTimeEffect that provides good quality time stretching and pitch shifting
@discussion
In this time effect, the playback rate and pitch parameters function independently of each other
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitTimePitch : AVAudioUnitTimeEffect
/*! @property rate
@abstract playback rate of the input signal
Range: 1/32 -> 32.0
Default: 1.0
Unit: Generic
*/
@property (nonatomic) float rate;
/*! @property pitch
@abstract amount by which the input signal is pitch shifted
@discussion
1 octave = 1200 cents
1 musical semitone = 100 cents
Range: -2400 -> 2400
Default: 0.0
Unit: Cents
*/
@property (nonatomic) float pitch;
/*! @property overlap
@abstract amount of overlap between segments of the input audio signal
@discussion
A higher value results in fewer artifacts in the output signal.
This parameter also impacts the amount of CPU used.
Range: 3.0 -> 32.0
Default: 8.0
Unit: Generic
*/
@property (nonatomic) float overlap;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,41 +0,0 @@
/*
File: AVAudioUnitVarispeed.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <AVFAudio/AVAudioUnitTimeEffect.h>
NS_ASSUME_NONNULL_BEGIN
/*! @class AVAudioUnitVarispeed
@abstract an AVAudioUnitTimeEffect that can be used to control the playback rate
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVAudioUnitVarispeed : AVAudioUnitTimeEffect
/*! @property rate
@abstract controls the playback rate of the audio signal
@discussion
Since this unit resamples the input signal, changing the playback rate also changes the pitch.
i.e. changing the rate to 2.0 results in the output audio playing one octave higher.
Similarly changing the rate to 0.5, results in the output audio playing one octave lower.
The playback rate and pitch can be calculated as
rate = pow(2, cents/1200.0)
pitch in cents = 1200.0 * log2(rate)
Where, 1 octave = 1200 cents
1 musical semitone = 100 cents
Range: 0.25 -> 4.0
Default: 1.0
Unit: Generic
*/
@property (nonatomic) float rate;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,43 +0,0 @@
//
// AVFAudio.h
// Copyright © 2015 Apple. All rights reserved.
//
#import <AVFAudio/AVAudioBuffer.h>
#import <AVFAudio/AVAudioChannelLayout.h>
#import <AVFAudio/AVAudioConnectionPoint.h>
#import <AVFAudio/AVAudioConverter.h>
#import <AVFAudio/AVAudioEngine.h>
#import <AVFAudio/AVAudioEnvironmentNode.h>
#import <AVFAudio/AVAudioFile.h>
#import <AVFAudio/AVAudioFormat.h>
#import <AVFAudio/AVAudioIONode.h>
#import <AVFAudio/AVAudioMixerNode.h>
#import <AVFAudio/AVAudioMixing.h>
#import <AVFAudio/AVAudioNode.h>
#import <AVFAudio/AVAudioPlayer.h>
#import <AVFAudio/AVAudioPlayerNode.h>
#import <AVFAudio/AVAudioRecorder.h>
#import <AVFAudio/AVAudioSequencer.h>
#import <AVFAudio/AVAudioSession.h>
#import <AVFAudio/AVAudioSettings.h>
#import <AVFAudio/AVAudioTime.h>
#import <AVFAudio/AVAudioTypes.h>
#import <AVFAudio/AVAudioUnit.h>
#import <AVFAudio/AVAudioUnitComponent.h>
#import <AVFAudio/AVAudioUnitDelay.h>
#import <AVFAudio/AVAudioUnitDistortion.h>
#import <AVFAudio/AVAudioUnitEQ.h>
#import <AVFAudio/AVAudioUnitEffect.h>
#import <AVFAudio/AVAudioUnitGenerator.h>
#import <AVFAudio/AVAudioUnitMIDIInstrument.h>
#import <AVFAudio/AVAudioUnitReverb.h>
#import <AVFAudio/AVAudioUnitSampler.h>
#import <AVFAudio/AVAudioUnitTimeEffect.h>
#import <AVFAudio/AVAudioUnitTimePitch.h>
#import <AVFAudio/AVAudioUnitVarispeed.h>
#import <AVFAudio/AVMIDIPlayer.h>
#if TARGET_OS_IPHONE
#import <AVFAudio/AVSpeechSynthesis.h>
#endif

View file

@ -1,94 +0,0 @@
/*
File: AVMIDIPlayer.h
Framework: AVFoundation
Copyright (c) 2014-2015 Apple Inc. All Rights Reserved.
*/
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAudioTime;
/*! @typedef AVMIDIPlayerCompletionHandler
@abstract Generic callback block.
*/
typedef void (^AVMIDIPlayerCompletionHandler)(void);
/*! @class AVMIDIPlayer
@abstract A player for music file formats (MIDI, iMelody).
*/
NS_CLASS_AVAILABLE(10_10, 8_0) __WATCHOS_PROHIBITED
@interface AVMIDIPlayer : NSObject {
@protected
void *_impl;
}
/*! @method initWithContentsOfURL:soundBankURL:error:
@abstract Create a player with the contents of the file specified by the URL.
@discussion
'bankURL' should contain the path to a SoundFont2 or DLS bank to be used
by the MIDI synthesizer. For OSX it can be set to nil for the default,
but for iOS it must always refer to a valid bank file.
*/
- (nullable instancetype)initWithContentsOfURL:(NSURL *)inURL soundBankURL:(NSURL * __nullable)bankURL error:(NSError **)outError;
/*! @method initWithData:soundBankURL:error:
@abstract Create a player with the contents of the data object
@discussion
'bankURL' should contain the path to a SoundFont2 or DLS bank to be used
by the MIDI synthesizer. For OSX it can be set to nil for the default,
but for iOS it must always refer to a valid bank file.
*/
- (nullable instancetype)initWithData:(NSData *)data soundBankURL:(NSURL * __nullable)bankURL error:(NSError **)outError;
/* transport control */
/*! @method prepareToPlay
@abstract Get ready to play the sequence by prerolling all events
@discussion
Happens automatically on play if it has not already been called, but may produce a delay in startup.
*/
- (void)prepareToPlay;
/*! @method play:
@abstract Play the sequence.
*/
- (void)play:(AVMIDIPlayerCompletionHandler __nullable)completionHandler;
/*! @method stop
@abstract Stop playing the sequence.
*/
- (void)stop;
/* properties */
/*! @property duration
@abstract The length of the currently loaded file in seconds.
*/
@property(nonatomic, readonly) NSTimeInterval duration;
/*! @property playing
@abstract Indicates whether or not the player is playing
*/
@property(nonatomic, readonly, getter=isPlaying) BOOL playing;
/*! @property rate
@abstract The playback rate of the player
@discussion
1.0 is normal playback rate. Rate must be > 0.0.
*/
@property (nonatomic) float rate;
/*! @property currentPosition
@abstract The current playback position in seconds
@discussion
Setting this positions the player to the specified time. No range checking on the time value is done.
This can be set while the player is playing, in which case playback will resume at the new time.
*/
@property(nonatomic) NSTimeInterval currentPosition;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,47 +0,0 @@
/*
File: AVAnimation.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
#import <CoreFoundation/CFDate.h>
/*!
@constant AVCoreAnimationBeginTimeAtZero
@discussion Use this constant to set the CoreAnimation's animation beginTime property to be time 0.
The constant is a small, non-zero, positive value which avoids CoreAnimation
from replacing 0.0 with CACurrentMediaTime().
*/
AVF_EXPORT const CFTimeInterval AVCoreAnimationBeginTimeAtZero NS_AVAILABLE(10_7, 4_0);
typedef NSString * AVLayerVideoGravity NS_STRING_ENUM;
/*!
@constant AVLayerVideoGravityResizeAspect
@abstract Preserve aspect ratio; fit within layer bounds.
@discussion AVLayerVideoGravityResizeAspect may be used when setting the videoGravity
property of an AVPlayerLayer or AVCaptureVideoPreviewLayer instance.
*/
AVF_EXPORT AVLayerVideoGravity const AVLayerVideoGravityResizeAspect NS_AVAILABLE(10_7, 4_0);
/*!
@constant AVLayerVideoGravityResizeAspectFill
@abstract Preserve aspect ratio; fill layer bounds.
@discussion AVLayerVideoGravityResizeAspectFill may be used when setting the videoGravity
property of an AVPlayerLayer or AVCaptureVideoPreviewLayer instance.
*/
AVF_EXPORT AVLayerVideoGravity const AVLayerVideoGravityResizeAspectFill NS_AVAILABLE(10_7, 4_0);
/*!
@constant AVLayerVideoGravityResize
@abstract Stretch to fill layer bounds.
@discussion AVLayerVideoGravityResize may be used when setting the videoGravity
property of an AVPlayerLayer or AVCaptureVideoPreviewLayer instance.
*/
AVF_EXPORT AVLayerVideoGravity const AVLayerVideoGravityResize NS_AVAILABLE(10_7, 4_0);

View file

@ -1,771 +0,0 @@
/*
File: AVAsset.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
#import <AVFoundation/AVAsynchronousKeyValueLoading.h>
#import <AVFoundation/AVContentKeySession.h>
#import <AVFoundation/AVMediaFormat.h>
#import <AVFoundation/AVMetadataFormat.h>
#import <CoreGraphics/CGAffineTransform.h>
#import <CoreMedia/CMTime.h>
#pragma mark --- AVAsset ---
/*!
@class AVAsset
@abstract
An AVAsset is an abstract class that defines AVFoundation's model for timed audiovisual media.
Each asset contains a collection of tracks that are intended to be presented or processed together, each of a uniform media type, including but not limited to audio, video, text, closed captions, and subtitles.
@discussion
AVAssets are often instantiated via its concrete subclass AVURLAsset with NSURLs that refer to audiovisual media resources, such as streams (including HTTP live streams), QuickTime movie files, MP3 files, and files of other types.
They can also be instantiated using other concrete subclasses that extend the basic model for audiovisual media in useful ways, as AVComposition does for temporal editing.
Properties of assets as a whole are defined by AVAsset. Additionally, references to instances of AVAssetTracks representing tracks of the collection can be obtained, so that each of these can be examined independently.
Because of the nature of timed audiovisual media, upon successful initialization of an AVAsset some or all of the values for its keys may not be immediately available. The value of any key can be requested at any time, and AVAsset will always return its value synchronously, although it may have to block the calling thread in order to do so.
In order to avoid blocking, clients can register their interest in particular keys and to become notified when their values become available. For further details, see AVAsynchronousKeyValueLoading.h.
On iOS, it is particularly important to avoid blocking. To preserve responsiveness, a synchronous request that blocks for too long (eg, a property request on an asset on a slow HTTP server) may lead to media services being reset.
To play an instance of AVAsset, initialize an instance of AVPlayerItem with it, use the AVPlayerItem to set up its presentation state (such as whether only a limited timeRange of the asset should be played, etc.), and provide the AVPlayerItem to an AVPlayer according to whether the items is to be played by itself or together with a collection of other items. Full details available in AVPlayerItem.h and AVPlayer.h.
AVAssets can also be inserted into AVMutableCompositions in order to assemble audiovisual constructs from one or more source assets.
*/
NS_ASSUME_NONNULL_BEGIN
@class AVAssetTrack;
@class AVFragmentedAssetTrack;
@class AVMetadataItem;
@class AVMediaSelection;
@class AVCompositionTrack;
@class AVAssetInternal;
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAsset : NSObject <NSCopying, AVAsynchronousKeyValueLoading>
{
@private
AVAssetInternal *_asset;
}
/*!
@method assetWithURL:
@abstract Returns an instance of AVAsset for inspection of a media resource.
@param URL
An instance of NSURL that references a media resource.
@result An instance of AVAsset.
@discussion Returns a newly allocated instance of a subclass of AVAsset initialized with the specified URL.
*/
+ (instancetype)assetWithURL:(NSURL *)URL;
/* Indicates the duration of the asset. If @"providesPreciseDurationAndTiming" is NO, a best-available estimate of the duration is returned. The degree of precision preferred for timing-related properties can be set at initialization time for assets initialized with URLs. See AVURLAssetPreferPreciseDurationAndTimingKey for AVURLAsset below.
*/
@property (nonatomic, readonly) CMTime duration;
/* indicates the natural rate at which the asset is to be played; often but not always 1.0
*/
@property (nonatomic, readonly) float preferredRate;
/* indicates the preferred volume at which the audible media of an asset is to be played; often but not always 1.0
*/
@property (nonatomic, readonly) float preferredVolume;
/* indicates the preferred transform to apply to the visual content of the asset for presentation or processing; the value is often but not always the identity transform
*/
@property (nonatomic, readonly) CGAffineTransform preferredTransform;
/* The following property is deprecated. Instead, use the naturalSize and preferredTransform, as appropriate, of the receiver's video tracks. See -tracksWithMediaType: below.
*/
@property (nonatomic, readonly) CGSize naturalSize NS_DEPRECATED(10_7, 10_8, 4_0, 5_0);
@end
@interface AVAsset (AVAssetAsynchronousLoading)
/* Indicates that the asset provides precise timing. See @"duration" above and AVURLAssetPreferPreciseDurationAndTimingKey below.
*/
@property (nonatomic, readonly) BOOL providesPreciseDurationAndTiming;
/*!
@method cancelLoading
@abstract Cancels the loading of all values for all observers.
@discussion Deallocation or finalization of an instance of AVAsset will implicitly cancel loading if any loading requests are still outstanding.
*/
- (void)cancelLoading;
@end
@interface AVAsset (AVAssetReferenceRestrictions)
/*!
@enum AVAssetReferenceRestrictions
@abstract These constants can be passed in to AVURLAssetReferenceRestrictionsKey to control the resolution of references to external media data.
@constant AVAssetReferenceRestrictionForbidNone
Indicates that all types of references should be followed.
@constant AVAssetReferenceRestrictionForbidRemoteReferenceToLocal
Indicates that references from a remote asset (e.g. referenced via http URL) to local media data (e.g. stored in a local file) should not be followed.
@constant AVAssetReferenceRestrictionForbidLocalReferenceToRemote
Indicates that references from a local asset to remote media data should not be followed.
@constant AVAssetReferenceRestrictionForbidCrossSiteReference
Indicates that references from a remote asset to remote media data stored at a different site should not be followed.
@constant AVAssetReferenceRestrictionForbidLocalReferenceToLocal
Indicates that references from a local asset to local media data stored outside the asset's container file should not be followed.
@constant AVAssetReferenceRestrictionForbidAll
Indicates that only references to media data stored within the asset's container file should be allowed.
*/
typedef NS_OPTIONS(NSUInteger, AVAssetReferenceRestrictions) {
AVAssetReferenceRestrictionForbidNone = 0UL,
AVAssetReferenceRestrictionForbidRemoteReferenceToLocal = (1UL << 0),
AVAssetReferenceRestrictionForbidLocalReferenceToRemote = (1UL << 1),
AVAssetReferenceRestrictionForbidCrossSiteReference = (1UL << 2),
AVAssetReferenceRestrictionForbidLocalReferenceToLocal = (1UL << 3),
AVAssetReferenceRestrictionForbidAll = 0xFFFFUL,
};
/*!
@property referenceRestrictions
@abstract Indicates the reference restrictions being used by the receiver.
@discussion
For AVURLAsset, this property reflects the value passed in for AVURLAssetReferenceRestrictionsKey, if any. See AVURLAssetReferenceRestrictionsKey below for a full discussion of reference restrictions. The default value for this property is AVAssetReferenceRestrictionForbidNone.
*/
@property (nonatomic, readonly) AVAssetReferenceRestrictions referenceRestrictions NS_AVAILABLE(10_7, 5_0);
@end
@class AVAssetTrackGroup;
@interface AVAsset (AVAssetTrackInspection)
/*!
@property tracks
@abstract Provides the array of AVAssetTracks contained by the asset
*/
@property (nonatomic, readonly) NSArray<AVAssetTrack *> *tracks;
/*!
@method trackWithTrackID:
@abstract Provides an instance of AVAssetTrack that represents the track of the specified trackID.
@param trackID
The trackID of the requested AVAssetTrack.
@result An instance of AVAssetTrack; may be nil if no track of the specified trackID is available.
@discussion Becomes callable without blocking when the key @"tracks" has been loaded
*/
- (nullable AVAssetTrack *)trackWithTrackID:(CMPersistentTrackID)trackID;
/*!
@method tracksWithMediaType:
@abstract Provides an array of AVAssetTracks of the asset that present media of the specified media type.
@param mediaType
The media type according to which AVAsset filters its AVAssetTracks. (Media types are defined in AVMediaFormat.h.)
@result An NSArray of AVAssetTracks; may be empty if no tracks of the specified media type are available.
@discussion Becomes callable without blocking when the key @"tracks" has been loaded
*/
- (NSArray<AVAssetTrack *> *)tracksWithMediaType:(AVMediaType)mediaType;
/*!
@method tracksWithMediaCharacteristic:
@abstract Provides an array of AVAssetTracks of the asset that present media with the specified characteristic.
@param mediaCharacteristic
The media characteristic according to which AVAsset filters its AVAssetTracks. (Media characteristics are defined in AVMediaFormat.h.)
@result An NSArray of AVAssetTracks; may be empty if no tracks with the specified characteristic are available.
@discussion Becomes callable without blocking when the key @"tracks" has been loaded
*/
- (NSArray<AVAssetTrack *> *)tracksWithMediaCharacteristic:(AVMediaCharacteristic)mediaCharacteristic;
/*!
@property trackGroups
@abstract
All track groups in the receiver.
@discussion
The value of this property is an NSArray of AVAssetTrackGroups, each representing a different grouping of tracks in the receiver.
*/
@property (nonatomic, readonly) NSArray<AVAssetTrackGroup *> *trackGroups NS_AVAILABLE(10_9, 7_0);
@end
@interface AVAsset (AVAssetMetadataReading)
// high-level access to selected metadata of common interest
/* Indicates the creation date of the asset as an AVMetadataItem. May be nil. If a creation date has been stored by the asset in a form that can be converted to an NSDate, the dateValue property of the AVMetadataItem will provide an instance of NSDate. Otherwise the creation date is available only as a string value, via -[AVMetadataItem stringValue].
*/
@property (nonatomic, readonly, nullable) AVMetadataItem *creationDate NS_AVAILABLE(10_8, 5_0);
/* Provides access to the lyrics of the asset suitable for the current locale.
*/
@property (nonatomic, readonly, nullable) NSString *lyrics;
/* Provides access to an array of AVMetadataItems for each common metadata key for which a value is available; items can be filtered according to language via +[AVMetadataItem metadataItemsFromArray:filteredAndSortedAccordingToPreferredLanguages:] and according to identifier via +[AVMetadataItem metadataItemsFromArray:filteredByIdentifier:].
*/
@property (nonatomic, readonly) NSArray<AVMetadataItem *> *commonMetadata;
/* Provides access to an array of AVMetadataItems for all metadata identifiers for which a value is available; items can be filtered according to language via +[AVMetadataItem metadataItemsFromArray:filteredAndSortedAccordingToPreferredLanguages:] and according to identifier via +[AVMetadataItem metadataItemsFromArray:filteredByIdentifier:].
*/
@property (nonatomic, readonly) NSArray<AVMetadataItem *> *metadata NS_AVAILABLE(10_10, 8_0);
/* Provides an NSArray of NSStrings, each representing a metadata format that's available to the asset (e.g. ID3, iTunes metadata, etc.). Metadata formats are defined in AVMetadataFormat.h.
*/
@property (nonatomic, readonly) NSArray<AVMetadataFormat> *availableMetadataFormats;
/*!
@method metadataForFormat:
@abstract Provides an NSArray of AVMetadataItems, one for each metadata item in the container of the specified format; can subsequently be filtered according to language via +[AVMetadataItem metadataItemsFromArray:filteredAndSortedAccordingToPreferredLanguages:], according to locale via +[AVMetadataItem metadataItemsFromArray:withLocale:], or according to key via +[AVMetadataItem metadataItemsFromArray:withKey:keySpace:].
@param format
The metadata format for which items are requested.
@result An NSArray containing AVMetadataItems; may be empty if there is no metadata of the specified format.
@discussion Becomes callable without blocking when the key @"availableMetadataFormats" has been loaded
*/
- (NSArray<AVMetadataItem *> *)metadataForFormat:(AVMetadataFormat)format;
@end
@class AVTimedMetadataGroup;
@interface AVAsset (AVAssetChapterInspection)
/* array of NSLocale
*/
@property (readonly) NSArray<NSLocale *> *availableChapterLocales NS_AVAILABLE(10_7, 4_3);
/*!
@method chapterMetadataGroupsWithTitleLocale:containingMetadataItemsWithCommonKeys:
@abstract Provides an array of chapters.
@param locale
Locale of the metadata items carrying chapter titles to be returned (supports the IETF BCP 47 specification).
@param commonKeys
Array of common keys of AVMetadataItem to be included; can be nil.
AVMetadataCommonKeyArtwork is the only supported key for now.
@result An NSArray of AVTimedMetadataGroup.
@discussion
This method returns an array of AVTimedMetadataGroup objects. Each object in the array always contains an AVMetadataItem representing the chapter title; the timeRange property of the AVTimedMetadataGroup object is equal to the time range of the chapter title item.
An AVMetadataItem with the specified common key will be added to an existing AVTimedMetadataGroup object if the time range (timestamp and duration) of the metadata item and the metadata group overlaps. The locale of items not carrying chapter titles need not match the specified locale parameter.
Further filtering of the metadata items in AVTimedMetadataGroups according to language can be accomplished using +[AVMetadataItem metadataItemsFromArray:filteredAndSortedAccordingToPreferredLanguages:]; filtering of the metadata items according to locale can be accomplished using +[AVMetadataItem metadataItemsFromArray:withLocale:].
*/
- (NSArray<AVTimedMetadataGroup *> *)chapterMetadataGroupsWithTitleLocale:(NSLocale *)locale containingItemsWithCommonKeys:(nullable NSArray<AVMetadataKey> *)commonKeys NS_AVAILABLE(10_7, 4_3);
/*!
@method chapterMetadataGroupsBestMatchingPreferredLanguages:
@abstract Tests, in order of preference, for a match between language identifiers in the specified array of preferred languages and the available chapter locales, and returns the array of chapters corresponding to the first match that's found.
@param preferredLanguages
An array of language identifiers in order of preference, each of which is an IETF BCP 47 (RFC 4646) language identifier. Use +[NSLocale preferredLanguages] to obtain the user's list of preferred languages.
@result An NSArray of AVTimedMetadataGroup.
@discussion
Safe to call without blocking when the AVAsset key availableChapterLocales has status AVKeyValueStatusLoaded.
Returns an array of AVTimedMetadataGroup objects. Each object in the array always contains an AVMetadataItem representing the chapter title; the timeRange property of the AVTimedMetadataGroup object is equal to the time range of the chapter title item.
All of the available chapter metadata is included in the metadata groups, including items with the common key AVMetadataCommonKeyArtwork, if such items are present. Items not carrying chapter titles will be added to an existing AVTimedMetadataGroup object if the time range (timestamp and duration) of the metadata item and that of the metadata group overlaps. The locale of such items need not match the locale of the chapter titles.
Further filtering of the metadata items in AVTimedMetadataGroups according to language can be accomplished using +[AVMetadataItem metadataItemsFromArray:filteredAndSortedAccordingToPreferredLanguages:]; filtering of the metadata items according to locale can be accomplished using +[AVMetadataItem metadataItemsFromArray:withLocale:].
.
*/
- (NSArray<AVTimedMetadataGroup *> *)chapterMetadataGroupsBestMatchingPreferredLanguages:(NSArray<NSString *> *)preferredLanguages NS_AVAILABLE(10_8, 6_0);
@end
@class AVMediaSelectionGroup;
@interface AVAsset (AVAssetMediaSelection)
/* Provides an NSArray of NSStrings, each NSString indicating a media characteristic for which a media selection option is available.
*/
@property (nonatomic, readonly) NSArray<AVMediaCharacteristic> *availableMediaCharacteristicsWithMediaSelectionOptions NS_AVAILABLE(10_8, 5_0);
/*!
@method mediaSelectionGroupForMediaCharacteristic:
@abstract Provides an instance of AVMediaSelectionGroup that contains one or more options with the specified media characteristic.
@param mediaCharacteristic
A media characteristic for which you wish to obtain the available media selection options. AVMediaCharacteristicAudible, AVMediaCharacteristicLegible, and AVMediaCharacteristicVisual are currently supported.
Pass AVMediaCharacteristicAudible to obtain the group of available options for audio media in various languages and for various purposes, such as descriptive audio.
Pass AVMediaCharacteristicLegible to obtain the group of available options for subtitles in various languages and for various purposes.
Pass AVMediaCharacteristicVisual to obtain the group of available options for video media.
@result An instance of AVMediaSelectionGroup. May be nil.
@discussion
Becomes callable without blocking when the key @"availableMediaCharacteristicsWithMediaSelectionOptions" has been loaded.
If the asset has no AVMediaSelectionGroup containing options with the specified media characteristic, the return value will be nil.
Filtering of the options in the returned AVMediaSelectionGroup according to playability, locale, and additional media characteristics can be accomplished using the category AVMediaSelectionOptionFiltering defined on AVMediaSelectionGroup.
*/
- (nullable AVMediaSelectionGroup *)mediaSelectionGroupForMediaCharacteristic:(AVMediaCharacteristic)mediaCharacteristic NS_AVAILABLE(10_8, 5_0);
/*!
@property preferredMediaSelection
@abstract Provides an instance of AVMediaSelection with default selections for each of the receiver's media selection groups.
*/
@property (nonatomic, readonly) AVMediaSelection *preferredMediaSelection NS_AVAILABLE(10_11, 9_0);
/*!
@property allMediaSelections
@abstract Provides an array of all permutations of AVMediaSelection for this asset.
@discussion
Becomes callable without blocking when the key @"availableMediaCharacteristicsWithMediaSelectionOptions" has been loaded.
*/
@property (nonatomic, readonly) NSArray <AVMediaSelection *> *allMediaSelections NS_AVAILABLE(10_13, 11_0);
@end
@interface AVAsset (AVAssetProtectedContent)
/*!
@property hasProtectedContent
@abstract Indicates whether or not the asset has protected content.
@discussion Assets containing protected content may not be playable without successful authorization, even if the value of the "playable" property is YES. See the properties in the AVAssetUsability category for details on how such an asset may be used. On OS X, clients can use the interfaces in AVPlayerItemProtectedContentAdditions.h to request authorization to play the asset.
*/
@property (nonatomic, readonly) BOOL hasProtectedContent NS_AVAILABLE(10_7, 4_2);
@end
@interface AVAsset (AVAssetFragments)
/*!
@property canContainFragments
@abstract Indicates whether the asset is capable of being extended by fragments.
@discussion For QuickTime movie files and MPEG-4 files, the value of canContainFragments is YES if an 'mvex' box is present in the 'moov' box. For those types, the 'mvex' box signals the possible presence of later 'moof' boxes.
*/
@property (nonatomic, readonly) BOOL canContainFragments NS_AVAILABLE(10_11, 9_0);
/*!
@property containsFragments
@abstract Indicates whether the asset is extended by at least one fragment.
@discussion For QuickTime movie files and MPEG-4 files, the value of this property is YES if canContainFragments is YES and at least one 'moof' box is present after the 'moov' box.
*/
@property (nonatomic, readonly) BOOL containsFragments NS_AVAILABLE(10_11, 9_0);
/*!
@property overallDurationHint
@abstract Indicates the total duration of fragments that either exist now or may be appended in the future in order to extend the duration of the asset.
@discussion For QuickTime movie files and MPEG-4 files, the value of this property is obtained from the 'mehd' box of the 'mvex' box, if present. If no total fragment duration hint is available, the value of this property is kCMTimeInvalid.
*/
@property (nonatomic, readonly) CMTime overallDurationHint NS_AVAILABLE(10_12_2, 10_2);
@end
@interface AVAsset (AVAssetUsability)
/*!
@property playable
@abstract Indicates whether an AVPlayer can play the contents of the asset in a manner that meets user expectations.
@discussion A client can attempt playback when playable is NO, this however may lead to a substandard playback experience.
*/
@property (nonatomic, readonly, getter=isPlayable) BOOL playable NS_AVAILABLE(10_7, 4_3);
/* indicates whether an AVAssetExportSession can be used with the receiver for export
*/
@property (nonatomic, readonly, getter=isExportable) BOOL exportable NS_AVAILABLE(10_7, 4_3);
/* indicates whether an AVAssetReader can be used with the receiver for extracting media data
*/
@property (nonatomic, readonly, getter=isReadable) BOOL readable NS_AVAILABLE(10_7, 4_3);
/* indicates whether the receiver can be used to build an AVMutableComposition
*/
@property (nonatomic, readonly, getter=isComposable) BOOL composable NS_AVAILABLE(10_7, 4_3);
#if TARGET_OS_IPHONE
/* indicates whether the receiver can be written to the saved photos album
*/
@property (nonatomic, readonly, getter=isCompatibleWithSavedPhotosAlbum) BOOL compatibleWithSavedPhotosAlbum NS_AVAILABLE_IOS(5_0);
#endif // TARGET_OS_IPHONE
/*!
@property compatibleWithAirPlayVideo
@abstract Indicates whether the asset is compatible with AirPlay Video.
@discussion YES if an AVPlayerItem initialized with the receiver can be played by an external device via AirPlay Video.
*/
@property (nonatomic, readonly, getter=isCompatibleWithAirPlayVideo) BOOL compatibleWithAirPlayVideo NS_AVAILABLE(10_11, 9_0);
@end
#pragma mark --- AVURLAsset ---
// Keys for options dictionary for use with -[AVURLAsset initWithURL:options:]
/*!
@constant AVURLAssetPreferPreciseDurationAndTimingKey
@abstract
Indicates whether the asset should be prepared to indicate a precise duration and provide precise random access by time.
The value for this key is a boolean NSNumber.
@discussion
If nil is passed as the value of the options parameter to -[AVURLAsset initWithURL:options:], or if a dictionary that lacks a value for the key AVURLAssetPreferPreciseDurationAndTimingKey is passed instead, a default value of NO is assumed. If the asset is intended to be played only, because AVPlayer will support approximate random access by time when full precision isn't available, the default value of NO will suffice.
Pass YES if longer loading times are acceptable in cases in which precise timing is required. If the asset is intended to be inserted into an AVMutableComposition, precise random access is typically desirable and the value of YES is recommended.
Note that such precision may require additional parsing of the resource in advance of operations that make use of any portion of it, depending on the specifics of its container format. Many container formats provide sufficient summary information for precise timing and do not require additional parsing to prepare for it; QuickTime movie files and MPEG-4 files are examples of such formats. Other formats do not provide sufficient summary information, and precise random access for them is possible only after a preliminary examination of a file's contents.
If you pass YES for an asset that you intend to play via an instance of AVPlayerItem and you are prepared for playback to commence before the value of -[AVPlayerItem duration] becomes available, you can omit the key @"duration" from the array of AVAsset keys you pass to -[AVPlayerItem initWithAsset:automaticallyLoadedAssetKeys:] in order to prevent AVPlayerItem from automatically loading the value of duration while the item becomes ready to play.
If precise duration and timing is not possible for the timed media resource referenced by the asset's URL, AVAsset.providesPreciseDurationAndTiming will be NO even if precise timing is requested via the use of this key.
*/
AVF_EXPORT NSString *const AVURLAssetPreferPreciseDurationAndTimingKey NS_AVAILABLE(10_7, 4_0);
/*!
@constant AVURLAssetReferenceRestrictionsKey
@abstract
Indicates the restrictions used by the asset when resolving references to external media data. The value of this key is an NSNumber wrapping an AVAssetReferenceRestrictions enum value or the logical combination of multiple such values.
@discussion
Some assets can contain references to media data stored outside the asset's container file, for example in another file. This key can be used to specify a policy to use when these references are encountered. If an asset contains one or more references of a type that is forbidden by the reference restrictions, loading of asset properties will fail. In addition, such an asset cannot be used with other AVFoundation modules, such as AVPlayerItem or AVAssetExportSession.
*/
AVF_EXPORT NSString *const AVURLAssetReferenceRestrictionsKey NS_AVAILABLE(10_7, 5_0);
/*!
@constant AVURLAssetHTTPCookiesKey
@abstract
HTTP cookies that the AVURLAsset may send with HTTP requests
Standard cross-site policy still applies: cookies will only be sent to domains to which they apply.
@discussion
By default, an AVURLAsset will only have access to cookies in the client's default cookie storage
that apply to the AVURLAsset's URL. You can supplement the cookies available to the asset
via use of this initialization option
HTTP cookies do not apply to non-HTTP(S) URLS.
In HLS, many HTTP requests (e.g., media, crypt key, variant index) might be issued to different paths or hosts.
In both of these cases, HTTP requests will be missing any cookies that do not apply to the AVURLAsset's URL.
This init option allows the AVURLAsset to use additional HTTP cookies for those HTTP(S) requests.
*/
AVF_EXPORT NSString *const AVURLAssetHTTPCookiesKey NS_AVAILABLE_IOS(8_0);
/*
@constant AVURLAssetAllowsCellularAccessKey
@abstract Indicates whether network requests on behalf of this asset are allowed to use the cellular interface.
@discussion
Default is YES.
*/
AVF_EXPORT NSString *const AVURLAssetAllowsCellularAccessKey NS_AVAILABLE_IOS(10_0);
/*!
@class AVURLAsset
@abstract AVURLAsset provides access to the AVAsset model for timed audiovisual media referenced by URL.
@discussion
Note that although instances of AVURLAsset are immutable, values for its keys may not be immediately available without blocking. See the discussion of the class AVAsset above regarding the availability of values for keys and the use of AVAsynchronousKeyValueLoading.
Once an AVURLAsset's value for a key is available, it will not change. AVPlayerItem provides access to information that can change dynamically during playback; see AVPlayerItem.duration and AVPlayerItem.tracks.
AVURLAssets can be initialized with NSURLs that refer to audiovisual media resources, such as streams (including HTTP live streams), QuickTime movie files, MP3 files, and files of other types.
*/
@class AVURLAssetInternal;
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVURLAsset : AVAsset
{
@private
AVURLAssetInternal *_URLAsset;
}
AV_INIT_UNAVAILABLE
/*!
@method audiovisualTypes
@abstract Provides the file types the AVURLAsset class understands.
@result An NSArray of UTIs identifying the file types the AVURLAsset class understands.
*/
+ (NSArray<AVFileType> *)audiovisualTypes NS_AVAILABLE(10_7, 5_0);
/*!
@method audiovisualMIMETypes
@abstract Provides the MIME types the AVURLAsset class understands.
@result An NSArray of NSStrings containing MIME types the AVURLAsset class understands.
*/
+ (NSArray<NSString *> *)audiovisualMIMETypes NS_AVAILABLE(10_7, 5_0);
/*!
@method isPlayableExtendedMIMEType:
@abstract Returns YES if asset is playable with the codec(s) and container type specified in extendedMIMEType. Returns NO otherwise.
@param extendedMIMEType
@result YES or NO.
*/
+ (BOOL)isPlayableExtendedMIMEType: (NSString *)extendedMIMEType NS_AVAILABLE(10_7, 5_0);
/*!
@method URLAssetWithURL:options:
@abstract Returns an instance of AVURLAsset for inspection of a media resource.
@param URL
An instance of NSURL that references a media resource.
@param options
An instance of NSDictionary that contains keys for specifying options for the initialization of the AVURLAsset. See AVURLAssetPreferPreciseDurationAndTimingKey and AVURLAssetReferenceRestrictionsKey above.
@result An instance of AVURLAsset.
*/
+ (instancetype)URLAssetWithURL:(NSURL *)URL options:(nullable NSDictionary<NSString *, id> *)options;
/*!
@method initWithURL:options:
@abstract Initializes an instance of AVURLAsset for inspection of a media resource.
@param URL
An instance of NSURL that references a media resource.
@param options
An instance of NSDictionary that contains keys for specifying options for the initialization of the AVURLAsset. See AVURLAssetPreferPreciseDurationAndTimingKey and AVURLAssetReferenceRestrictionsKey above.
@result An instance of AVURLAsset.
*/
- (instancetype)initWithURL:(NSURL *)URL options:(nullable NSDictionary<NSString *, id> *)options NS_DESIGNATED_INITIALIZER;
/* indicates the URL with which the instance of AVURLAsset was initialized
*/
@property (nonatomic, readonly, copy) NSURL *URL;
@end
@class AVAssetResourceLoader;
@interface AVURLAsset (AVURLAssetURLHandling)
/*!
@property resourceLoader
@abstract
Provides access to an instance of AVAssetResourceLoader, which offers limited control over the handling of URLs that may be loaded in the course of performing operations on the asset, such as playback.
The loading of file URLs cannot be mediated via use of AVAssetResourceLoader.
Note that copies of an AVAsset will vend the same instance of AVAssetResourceLoader.
*/
@property (nonatomic, readonly) AVAssetResourceLoader *resourceLoader NS_AVAILABLE(10_9, 6_0);
@end
@class AVAssetCache;
@interface AVURLAsset (AVURLAssetCache)
/*!
@property assetCache
@abstract Provides access to an instance of AVAssetCache to use for inspection of locally cached media data. Will be nil if an asset has not been configured to store or access media data from disk.
*/
@property (nonatomic, readonly, nullable) AVAssetCache *assetCache NS_AVAILABLE(10_12, 10_0);
@end
@interface AVURLAsset (AVAssetCompositionUtility )
/*!
@method compatibleTrackForCompositionTrack:
@abstract Provides a reference to an AVAssetTrack of the target from which any timeRange
can be inserted into a mutable composition track (via -[AVMutableCompositionTrack insertTimeRange:ofTrack:atTime:error:]).
@param compositionTrack
The composition track for which a compatible AVAssetTrack is requested.
@result an instance of AVAssetTrack
@discussion
Finds a track of the target with content that can be accommodated by the specified composition track.
The logical complement of -[AVMutableComposition mutableTrackCompatibleWithTrack:].
*/
- (nullable AVAssetTrack *)compatibleTrackForCompositionTrack:(AVCompositionTrack *)compositionTrack;
@end
#pragma mark --- AVAsset change notifications ---
/*
AVAsset change notifications are posted by instances of mutable subclasses, AVMutableComposition and AVMutableMovie.
Some of the notifications are also posted by instances of dynamic subclasses, AVFragmentedAsset and AVFragmentedMovie, but these are capable of changing only in well-defined ways and only under specific conditions that you control.
*/
/*!
@constant AVAssetDurationDidChangeNotification
@abstract Posted when the duration of an AVFragmentedAsset changes while it's being minded by an AVFragmentedAssetMinder, but only for changes that occur after the status of the value of @"duration" has reached AVKeyValueStatusLoaded.
*/
AVF_EXPORT NSString *const AVAssetDurationDidChangeNotification NS_AVAILABLE(10_11, 9_0);
/*!
@constant AVAssetContainsFragmentsDidChangeNotification
@abstract Posted after the value of @"containsFragments" has already been loaded and the AVFragmentedAsset is added to an AVFragmentedAssetMinder, either when 1) fragments are detected in the asset on disk after it had previously contained none or when 2) no fragments are detected in the asset on disk after it had previously contained one or more.
*/
AVF_EXPORT NSString *const AVAssetContainsFragmentsDidChangeNotification NS_AVAILABLE_MAC(10_11);
/*!
@constant AVAssetWasDefragmentedNotification
@abstract Posted when the asset on disk is defragmented while an AVFragmentedAsset is being minded by an AVFragmentedAssetMinder, but only if the defragmentation occurs after the status of the value of @"canContainFragments" has reached AVKeyValueStatusLoaded.
@discussion After this notification is posted, the value of the asset properties canContainFragments and containsFragments will both be NO.
*/
AVF_EXPORT NSString *const AVAssetWasDefragmentedNotification NS_AVAILABLE_MAC(10_11);
/*!
@constant AVAssetChapterMetadataGroupsDidChangeNotification
@abstract Posted when the collection of arrays of timed metadata groups representing chapters of an AVAsset change and when any of the contents of the timed metadata groups change, but only for changes that occur after the status of the value of @"availableChapterLocales" has reached AVKeyValueStatusLoaded.
*/
AVF_EXPORT NSString *const AVAssetChapterMetadataGroupsDidChangeNotification NS_AVAILABLE(10_11, 9_0);
/*!
@constant AVAssetMediaSelectionGroupsDidChangeNotification
@abstract Posted when the collection of media selection groups provided by an AVAsset changes and when any of the contents of its media selection groups change, but only for changes that occur after the status of the value of @"availableMediaCharacteristicsWithMediaSelectionOptions" has reached AVKeyValueStatusLoaded.
*/
AVF_EXPORT NSString *const AVAssetMediaSelectionGroupsDidChangeNotification NS_AVAILABLE(10_11, 9_0);
#pragma mark --- AVFragmentedAsset ---
/*!
@class AVFragmentedAsset
@abstract A subclass of AVURLAsset that represents media resources that can be extended in total duration without modifying previously existing data structures.
Such media resources include QuickTime movie files and MPEG-4 files that indicate, via an 'mvex' box in their 'moov' box, that they accommodate additional fragments. Media resources of other types may also be supported. To check whether a given instance of AVFragmentedAsset can be used to monitor the addition of fragments, check the value of the AVURLAsset property canContainFragments.
An AVFragmentedAsset is capable of changing the values of certain of its properties and those of its tracks, while an operation that appends fragments to the underlying media resource in in progress, if the AVFragmentedAsset is associated with an instance of AVFragmentedAssetMinder.
@discussion While associated with an AVFragmentedAssetMinder, AVFragmentedAssetTrack posts AVAssetDurationDidChangeNotification and whenever new fragments are detected, as appropriate. It may also post AVAssetContainsFragmentsDidChangeNotification and AVAssetWasDefragmentedNotification, as discussed in documentation of those notifications.
*/
@protocol AVFragmentMinding
/*!
@property associatedWithFragmentMinder
@abstract Indicates whether an AVAsset that supports fragment minding is currently associated with a fragment minder, e.g. an instance of AVFragmentedAssetMinder.
@discussion AVAssets that support fragment minding post change notifications only while associated with a fragment minder.
*/
@property (nonatomic, readonly, getter=isAssociatedWithFragmentMinder) BOOL associatedWithFragmentMinder NS_AVAILABLE_MAC(10_11);
@end
@class AVFragmentedAssetInternal;
NS_CLASS_AVAILABLE_MAC(10_11)
@interface AVFragmentedAsset : AVURLAsset <AVFragmentMinding>
{
@private
AVFragmentedAssetInternal *_fragmentedAsset __attribute__((unused));
}
/*!
@method fragmentedAssetWithURL:options:
@abstract Returns an instance of AVFragmentedAsset for inspection of a fragmented media resource.
@param URL
An instance of NSURL that references a media resource.
@param options
An instance of NSDictionary that contains keys for specifying options for the initialization of the AVFragmentedAsset. See AVURLAssetPreferPreciseDurationAndTimingKey and AVURLAssetReferenceRestrictionsKey above.
@result An instance of AVFragmentedAsset.
*/
+ (instancetype)fragmentedAssetWithURL:(NSURL *)URL options:(nullable NSDictionary<NSString *, id> *)options;
/*!
@property tracks
@abstract The tracks in an asset.
@discussion The value of this property is an array of tracks the asset contains; the tracks are of type AVFragmentedAssetTrack.
*/
@property (nonatomic, readonly) NSArray<AVFragmentedAssetTrack *> *tracks;
@end
@interface AVFragmentedAsset (AVFragmentedAssetTrackInspection)
/*!
@method trackWithTrackID:
@abstract Provides an instance of AVFragmentedAssetTrack that represents the track of the specified trackID.
@param trackID
The trackID of the requested AVFragmentedAssetTrack.
@result An instance of AVFragmentedAssetTrack; may be nil if no track of the specified trackID is available.
@discussion Becomes callable without blocking when the key @"tracks" has been loaded
*/
- (nullable AVFragmentedAssetTrack *)trackWithTrackID:(CMPersistentTrackID)trackID;
/*!
@method tracksWithMediaType:
@abstract Provides an array of AVFragmentedAssetTracks of the asset that present media of the specified media type.
@param mediaType
The media type according to which the receiver filters its AVFragmentedAssetTracks. (Media types are defined in AVMediaFormat.h)
@result An NSArray of AVFragmentedAssetTracks; may be empty if no tracks of the specified media type are available.
@discussion Becomes callable without blocking when the key @"tracks" has been loaded
*/
- (NSArray<AVFragmentedAssetTrack *> *)tracksWithMediaType:(AVMediaType)mediaType;
/*!
@method tracksWithMediaCharacteristic:
@abstract Provides an array of AVFragmentedAssetTracks of the asset that present media with the specified characteristic.
@param mediaCharacteristic
The media characteristic according to which the receiver filters its AVFragmentedAssetTracks. (Media characteristics are defined in AVMediaFormat.h)
@result An NSArray of AVFragmentedAssetTracks; may be empty if no tracks with the specified characteristic are available.
@discussion Becomes callable without blocking when the key @"tracks" has been loaded
*/
- (NSArray<AVFragmentedAssetTrack *> *)tracksWithMediaCharacteristic:(AVMediaCharacteristic)mediaCharacteristic;
@end
#pragma mark --- AVFragmentedAssetMinder ---
/*!
@class AVFragmentedAssetMinder
@abstract A class that periodically checks whether additional fragments have been appended to fragmented assets.
*/
@class AVFragmentedAssetMinderInternal;
NS_CLASS_AVAILABLE_MAC(10_11)
@interface AVFragmentedAssetMinder : NSObject
{
@private
AVFragmentedAssetMinderInternal *_fragmentedAssetMinder;
}
/*!
@method fragmentedAssetMinderWithAsset:mindingInterval:
@abstract Creates an AVFragmentedAssetMinder, adds the specified asset to it, and sets the mindingInterval to the specified value.
@param asset
An instance of AVFragmentedAsset to add to the AVFragmentedAssetMinder
@param mindingInterval
The initial minding interval of the AVFragmentedAssetMinder.
@result A new instance of AVFragmentedAssetMinder.
*/
+ (instancetype)fragmentedAssetMinderWithAsset:(AVAsset<AVFragmentMinding> *)asset mindingInterval:(NSTimeInterval)mindingInterval;
/*!
@property mindingInterval
@abstract An NSTimeInterval indicating how often a check for additional fragments should be performed. The default interval is 10.0.
*/
@property (nonatomic) NSTimeInterval mindingInterval;
/*!
@property assets
@abstract An NSArray of the AVFragmentedAsset objects being minded.
*/
@property (nonatomic, readonly) NSArray<AVAsset<AVFragmentMinding> *> *assets;
/*!
@method addFragmentedAsset:
@abstract Adds a fragmented asset to the array of assets being minded.
@param asset
The fragmented asset to add to the minder.
*/
- (void)addFragmentedAsset:(AVAsset<AVFragmentMinding> *)asset;
/*!
@method removeFragmentedAsset:
@abstract Removes a fragmented asset from the array of assets being minded.
@param asset
The fragmented asset to remove from the minder.
*/
- (void)removeFragmentedAsset:(AVAsset<AVFragmentMinding> *)asset;
@end
@interface AVURLAsset (AVURLAssetContentKeyEligibility) <AVContentKeyRecipient>
/*!
@property mayRequireContentKeysForMediaDataProcessing
@abstract Allows AVURLAsset to be added as a content key recipient to an AVContentKeySession.
*/
@property (nonatomic, readonly) BOOL mayRequireContentKeysForMediaDataProcessing;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,51 +0,0 @@
/*
File: AVAssetCache.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@class AVMediaSelectionGroup;
@class AVMediaSelectionOption;
/*!
@class AVAssetCache
@abstract
AVAssetCache is a class vended by an AVAsset used for the inspection of locally available media data.
@discussion
AVAssetCaches are vended by AVURLAsset's assetCache property.
*/
NS_CLASS_AVAILABLE(10_12, 10_0)
@interface AVAssetCache : NSObject
/*
@property playableOffline
@abstract
Returns YES if a complete rendition of an AVAsset is available to be played without a network connection.
@discussion
An answer of YES does not indicate that any given media selection is available for offline playback. To determine if a specific media selection is available offline, see mediaSelectionOptionsInMediaSelectionGroup:.
*/
@property (nonatomic, readonly, getter=isPlayableOffline) BOOL playableOffline;
/*
@method mediaSelectionOptionsInMediaSelectionGroup:
@abstract
Returns an array of AVMediaSelectionOptions in an AVMediaSelectionGroup that are available for offline operations, e.g. playback.
*/
- (NSArray<AVMediaSelectionOption *> *)mediaSelectionOptionsInMediaSelectionGroup:(AVMediaSelectionGroup *)mediaSelectionGroup;
AV_INIT_UNAVAILABLE
@end
NS_ASSUME_NONNULL_END

View file

@ -1,358 +0,0 @@
/*
File: AVAssetExportSession.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVMediaFormat.h>
#import <AVFoundation/AVAudioProcessingSettings.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMTime.h>
#import <CoreMedia/CMTimeRange.h>
// for CGSize
#import <CoreGraphics/CoreGraphics.h>
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAssetExportSession
@abstract An AVAssetExportSession creates a new timed media resource from the contents of an
existing AVAsset in the form described by a specified export preset.
@discussion
Prior to initializing an instance of AVAssetExportSession, you can invoke
+allExportPresets to obtain the complete list of presets available. Use
+exportPresetsCompatibleWithAsset: to obtain a list of presets that are compatible
with a specific AVAsset.
To configure an export, initialize an AVAssetExportSession with an AVAsset that contains
the source media, an AVAssetExportPreset, the output file type, (a UTI string from
those defined in AVMediaFormat.h) and the output URL.
After configuration is complete, invoke exportAsynchronouslyWithCompletionHandler:
to start the export process. This method returns immediately; the export is performed
asynchronously. Invoke the -progress method to check on the progress. Note that in
some cases, depending on the capabilities of the device, when multiple exports are
attempted at the same time some may be queued until others have been completed. When
this happens, the status of a queued export will indicate that it's "waiting".
Whether the export fails, completes, or is cancelled, the completion handler you
supply to -exportAsynchronouslyWithCompletionHandler: will be called. Upon
completion, the status property indicates whether the export has completed
successfully. If it has failed, the value of the error property supplies additional
information about the reason for the failure.
*/
// -- Export Preset Names --
/* These export options can be used to produce movie files with video size appropriate to the device.
The export will not scale the video up from a smaller size. The video will be compressed using
H.264 and the audio will be compressed using AAC. */
AVF_EXPORT NSString *const AVAssetExportPresetLowQuality NS_AVAILABLE(10_11, 4_0);
AVF_EXPORT NSString *const AVAssetExportPresetMediumQuality NS_AVAILABLE(10_11, 4_0);
AVF_EXPORT NSString *const AVAssetExportPresetHighestQuality NS_AVAILABLE(10_11, 4_0);
/* These export options can be used to produce movie files with video size appropriate to the device.
The export will not scale the video up from a smaller size. The video will be compressed using
HEVC and the audio will be compressed using AAC. */
AVF_EXPORT NSString *const AVAssetExportPresetHEVCHighestQuality NS_AVAILABLE(10_13, 11_0);
/* These export options can be used to produce movie files with the specified video size.
The export will not scale the video up from a smaller size. The video will be compressed using
H.264 and the audio will be compressed using AAC. Some devices cannot support some sizes. */
AVF_EXPORT NSString *const AVAssetExportPreset640x480 NS_AVAILABLE(10_7, 4_0);
AVF_EXPORT NSString *const AVAssetExportPreset960x540 NS_AVAILABLE(10_7, 4_0);
AVF_EXPORT NSString *const AVAssetExportPreset1280x720 NS_AVAILABLE(10_7, 4_0);
AVF_EXPORT NSString *const AVAssetExportPreset1920x1080 NS_AVAILABLE(10_7, 5_0);
AVF_EXPORT NSString *const AVAssetExportPreset3840x2160 NS_AVAILABLE(10_10, 9_0);
/* These export options can be used to produce movie files with the specified video size.
The export will not scale the video up from a smaller size. The video will be compressed using
HEVC and the audio will be compressed using AAC. Some devices cannot support some sizes. */
AVF_EXPORT NSString *const AVAssetExportPresetHEVC1920x1080 NS_AVAILABLE(10_13, 11_0);
AVF_EXPORT NSString *const AVAssetExportPresetHEVC3840x2160 NS_AVAILABLE(10_13, 11_0);
/* This export option will produce an audio-only .m4a file with appropriate iTunes gapless playback data */
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4A NS_AVAILABLE(10_7, 4_0);
/* This export option will cause the media of all tracks to be passed through to the output exactly as stored in the source asset, except for
tracks for which passthrough is not possible, usually because of constraints of the container format as indicated by the specified outputFileType.
This option is not included in the arrays returned by -allExportPresets and -exportPresetsCompatibleWithAsset. */
AVF_EXPORT NSString *const AVAssetExportPresetPassthrough NS_AVAILABLE(10_7, 4_0);
#if (TARGET_OS_MAC && !(TARGET_OS_EMBEDDED || TARGET_OS_IPHONE))
/* These export options are used to produce files that can be played on the specified Apple devices.
These presets are available for Desktop export only.
The files should have .m4v extensions (or .m4a for exports with audio only sources). */
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4VCellular NS_AVAILABLE(10_7, NA);
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4ViPod NS_AVAILABLE(10_7, NA);
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4V480pSD NS_AVAILABLE(10_7, NA);
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4VAppleTV NS_AVAILABLE(10_7, NA);
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4VWiFi NS_AVAILABLE(10_7, NA);
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4V720pHD NS_AVAILABLE(10_7, NA);
AVF_EXPORT NSString *const AVAssetExportPresetAppleM4V1080pHD NS_AVAILABLE(10_8, NA);
/* This export option will produce a QuickTime movie with Apple ProRes 422 video and LPCM audio. */
AVF_EXPORT NSString *const AVAssetExportPresetAppleProRes422LPCM NS_AVAILABLE(10_7, NA);
#endif // (TARGET_OS_MAC && !(TARGET_OS_EMBEDDED || TARGET_OS_IPHONE))
@class AVAsset;
@class AVAssetExportSessionInternal;
@class AVAudioMix;
@class AVVideoComposition;
@class AVMetadataItemFilter;
@protocol AVVideoCompositing;
@class AVMetadataItem;
typedef NS_ENUM(NSInteger, AVAssetExportSessionStatus) {
AVAssetExportSessionStatusUnknown,
AVAssetExportSessionStatusWaiting,
AVAssetExportSessionStatusExporting,
AVAssetExportSessionStatusCompleted,
AVAssetExportSessionStatusFailed,
AVAssetExportSessionStatusCancelled
};
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAssetExportSession : NSObject
{
@private
AVAssetExportSessionInternal *_exportSession;
}
AV_INIT_UNAVAILABLE
/*!
@method exportSessionWithAsset:presetName:
@abstract Returns an instance of AVAssetExportSession for the specified source asset and preset.
@param asset An AVAsset object that is intended to be exported.
@param presetName An NSString specifying the name of the preset template for the export.
@result An instance of AVAssetExportSession.
@discussion If the specified asset belongs to a mutable subclass of AVAsset, AVMutableComposition or AVMutableMovie, the results of any export-related operation are undefined if you mutate the asset after the operation commences. These operations include but are not limited to: 1) testing the compatibility of export presets with the asset, 2) calculating the maximum duration or estimated length of the output file, and 3) the export operation itself.
*/
+ (nullable instancetype)exportSessionWithAsset:(AVAsset *)asset presetName:(NSString *)presetName NS_AVAILABLE(10_7, 4_1);
/*!
@method initWithAsset:presetName:
@abstract Initialize an AVAssetExportSession with the specified preset and set the source to the contents of the asset.
@param asset An AVAsset object that is intended to be exported.
@param presetName An NSString specifying the name of the preset template for the export.
@result Returns the initialized AVAssetExportSession.
@discussion If the specified asset belongs to a mutable subclass of AVAsset, AVMutableComposition or AVMutableMovie, the results of any export-related operation are undefined if you mutate the asset after the operation commences. These operations include but are not limited to: 1) testing the compatibility of export presets with the asset, 2) calculating the maximum duration or estimated length of the output file, and 3) the export operation itself.
*/
- (nullable instancetype)initWithAsset:(AVAsset *)asset presetName:(NSString *)presetName NS_DESIGNATED_INITIALIZER;
/* AVAssetExortSession properties are key-value observable unless documented otherwise */
/* Indicates the name of the preset with which the AVExportSession was initialized */
@property (nonatomic, readonly) NSString *presetName;
/* Indicates the instance of AVAsset with which the AVExportSession was initialized */
@property (nonatomic, retain, readonly) AVAsset *asset NS_AVAILABLE(10_8, 5_0);
/* Indicates the type of file to be written by the session.
The value of this property must be set before you invoke -exportAsynchronouslyWithCompletionHandler:; otherwise -exportAsynchronouslyWithCompletionHandler: will raise an NSInternalInconsistencyException.
Setting the value of this property to a file type that's not among the session's supported file types will result in an NSInvalidArgumentException. See supportedFileTypes. */
@property (nonatomic, copy, nullable) AVFileType outputFileType;
/* Indicates the URL of the export session's output. You may use UTTypeCopyPreferredTagWithClass(outputFileType, kUTTagClassFilenameExtension) to obtain an appropriate path extension for the outputFileType you have specified. For more information about UTTypeCopyPreferredTagWithClass and kUTTagClassFilenameExtension, on iOS see <MobileCoreServices/UTType.h> and on Mac OS X see <LaunchServices/UTType.h>. */
@property (nonatomic, copy, nullable) NSURL *outputURL;
/* indicates that the output file should be optimized for network use, e.g. that a QuickTime movie file should support "fast start" */
@property (nonatomic) BOOL shouldOptimizeForNetworkUse;
/* indicates the status of the export session */
@property (nonatomic, readonly) AVAssetExportSessionStatus status;
/* describes the error that occured if the export status is AVAssetExportSessionStatusFailed */
@property (nonatomic, readonly, nullable) NSError *error;
/*!
@method exportAsynchronouslyWithCompletionHandler:
@abstract Starts the asynchronous execution of an export session.
@param handler
If internal preparation for export fails, the handler will be invoked synchronously.
The handler may also be called asynchronously after -exportAsynchronouslyWithCompletionHandler: returns,
in the following cases:
1) if a failure occurs during the export, including failures of loading, re-encoding, or writing media data to the output,
2) if -cancelExport is invoked,
3) if export session succeeds, having completely written its output to the outputURL.
In each case, AVAssetExportSession.status will signal the terminal state of the asset reader, and if a failure occurs, the NSError
that describes the failure can be obtained from the error property.
@discussion Initiates an asynchronous export operation and returns immediately.
*/
- (void)exportAsynchronouslyWithCompletionHandler:(void (^)(void))handler;
/* Specifies the progress of the export on a scale from 0 to 1.0. A value of 0 means the export has not yet begun, A value of 1.0 means the export is complete. This property is not key-value observable. */
@property (nonatomic, readonly) float progress;
/*!
@method cancelExport
@abstract Cancels the execution of an export session.
@discussion Cancel can be invoked when the export is running.
*/
- (void)cancelExport;
@end
@interface AVAssetExportSession (AVAssetExportSessionPresets)
/*!
@method allExportPresets
@abstract Returns all available export preset names.
@discussion Returns an array of NSStrings with the names of all available presets. Note that not all presets are
compatible with all AVAssets.
@result An NSArray containing an NSString for each of the available preset names.
*/
+ (NSArray<NSString *> *)allExportPresets;
/*!
@method exportPresetsCompatibleWithAsset:
@abstract Returns only the identifiers compatible with the given AVAsset object.
@discussion Not all export presets are compatible with all AVAssets. For example an video only asset is not compatible with an audio only preset.
This method returns only the identifiers for presets that will be compatible with the given asset.
A client should pass in an AVAsset that is ready to be exported.
In order to ensure that the setup and running of an export operation will succeed using a given preset no significant changes
(such as adding or deleting tracks) should be made to the asset between retrieving compatible identifiers and performing the export operation.
This method will access the tracks property of the AVAsset to build the returned NSArray. To avoid blocking the calling thread,
the tracks property should be loaded using the AVAsynchronousKeyValueLoading protocol before calling this method.
@param asset An AVAsset object that is intended to be exported.
@result An NSArray containing NSString values for the identifiers of compatible export types.
The array is a complete list of the valid identifiers that can be used as arguments to
initWithAsset:presetName: with the specified asset.
*/
+ (NSArray<NSString *> *)exportPresetsCompatibleWithAsset:(AVAsset *)asset;
/*!
@method determineCompatibilityOfExportPreset:withAsset:outputFileType:completionHandler:
@abstract Performs an inspection on the compatibility of an export preset, AVAsset and output file type. Calls the completion handler with YES if
the arguments are compatible; NO otherwise.
@discussion Not all export presets are compatible with all AVAssets and file types. This method can be used to query compatibility.
In order to ensure that the setup and running of an export operation will succeed using a given preset no significant changes
(such as adding or deleting tracks) should be made to the asset between retrieving compatible identifiers and performing the export operation.
@param presetName An NSString specifying the name of the preset template for the export.
@param asset An AVAsset object that is intended to be exported.
@param outputFileType An AVFileType indicating a file type to check; or nil, to query whether there are any compatible types.
@param completionHandler A block called with the compatibility result.
*/
+ (void)determineCompatibilityOfExportPreset:(NSString *)presetName withAsset:(AVAsset *)asset outputFileType:(nullable AVFileType)outputFileType completionHandler:(void (^)(BOOL compatible))handler NS_AVAILABLE(10_9, 6_0);
@end
@interface AVAssetExportSession (AVAssetExportSessionFileTypes)
/* Indicates the types of files the target can write, according to the preset the target was initialized with.
Does not perform an inspection of the AVAsset to determine whether its contents are compatible with the supported file types. If you need to make that determination before initiating the export, use - (void)determineCompatibleFileTypesWithCompletionHandler:(void (^)(NSArray *compatibleFileTypes))handler:. */
@property (nonatomic, readonly) NSArray<AVFileType> *supportedFileTypes;
/*!
@method determineCompatibleFileTypesWithCompletionHandler:
@abstract Performs an inspection on the AVAsset and Preset the object was initialized with to determine a list of file types the ExportSession can write.
@param handler
Called when the inspection completes with an array of file types the ExportSession can write. Note that this may have a count of zero.
@discussion This method is different than the supportedFileTypes property in that it performs an inspection of the AVAsset in order to determine its compatibility with each of the session's supported file types.
*/
- (void)determineCompatibleFileTypesWithCompletionHandler:(void (^)(NSArray<AVFileType> *compatibleFileTypes))handler NS_AVAILABLE(10_9, 6_0);
@end
@interface AVAssetExportSession (AVAssetExportSessionDurationAndLength)
/* Specifies a time range to be exported from the source. The default timeRange of an export session is kCMTimeZero..kCMTimePositiveInfinity, meaning that the full duration of the asset will be exported. */
@property (nonatomic) CMTimeRange timeRange;
#if TARGET_OS_IPHONE
/* Provides an estimate of the maximum duration of exported media that is possible given the source asset, the export preset, and the current value of fileLengthLimit. The export will not stop when it reaches this maximum duration; set the timeRange property to export only a certain time range. */
@property (nonatomic, readonly) CMTime maxDuration NS_AVAILABLE_IOS(4_0);
#endif // TARGET_OS_IPHONE
/* Indicates the estimated byte size of exported file. Returns zero when export preset is AVAssetExportPresetPassthrough or AVAssetExportPresetAppleProRes422LPCM. This property will also return zero if a numeric value (ie. not invalid, indefinite, or infinite) for the timeRange property has not been set. */
@property (nonatomic, readonly) long long estimatedOutputFileLength NS_AVAILABLE(10_9, 5_0);
#if TARGET_OS_IPHONE
/* Indicates the file length that the output of the session should not exceed. Depending on the content of the source asset, it is possible for the output to slightly exceed the file length limit. The length of the output file should be tested if you require that a strict limit be observed before making use of the output. See also maxDuration and timeRange. */
@property (nonatomic) long long fileLengthLimit NS_AVAILABLE_IOS(4_0);
#endif // TARGET_OS_IPHONE
@end
@interface AVAssetExportSession (AVAssetExportSessionMetadata)
/* Specifies an NSArray of AVMetadataItems that are to be written to the output file by the export session.
If the value of this key is nil, any existing metadata in the exported asset will be translated as accurately as possible into
the appropriate metadata keyspace for the output file and written to the output. */
@property (nonatomic, copy, nullable) NSArray<AVMetadataItem *> *metadata;
/* Specifies a filter object to be used during export to determine which metadata items should be transferred from the source asset.
If the value of this key is nil, no filter will be applied. This is the default.
The filter will not be applied to metadata set with via the metadata property. To apply the filter to metadata before it is set on the metadata property, see the methods in AVMetadataItem's AVMetadataItemArrayFiltering category. */
@property (nonatomic, retain, nullable) AVMetadataItemFilter *metadataItemFilter NS_AVAILABLE(10_9, 7_0);
@end
@interface AVAssetExportSession (AVAssetExportSessionMediaProcessing)
/* Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h. An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
The default value is AVAudioTimePitchAlgorithmSpectral. */
@property (nonatomic, copy) AVAudioTimePitchAlgorithm audioTimePitchAlgorithm NS_AVAILABLE(10_9, 7_0);
/* Indicates whether non-default audio mixing is enabled for export and supplies the parameters for audio mixing. Ignored when export preset is AVAssetExportPresetPassthrough. */
@property (nonatomic, copy, nullable) AVAudioMix *audioMix;
/* Indicates whether video composition is enabled for export and supplies the instructions for video composition. Ignored when export preset is AVAssetExportPresetPassthrough. */
@property (nonatomic, copy, nullable) AVVideoComposition *videoComposition;
/* Indicates the custom video compositor instance used, if any */
@property (nonatomic, readonly, nullable) id <AVVideoCompositing> customVideoCompositor NS_AVAILABLE(10_9, 7_0);
@end
@interface AVAssetExportSession (AVAssetExportSessionMultipass)
/*!
@property canPerformMultiplePassesOverSourceMediaData
@abstract
Determines whether the export session can perform multiple passes over the source media to achieve better results.
@discussion
When the value for this property is YES, the export session can produce higher quality results at the expense of longer export times. Setting this property to YES may also require the export session to write temporary data to disk during the export. To control the location of temporary data, use the property directoryForTemporaryFiles.
The default value is NO. Not all export session configurations can benefit from performing multiple passes over the source media. In these cases, setting this property to YES has no effect.
This property cannot be set after the export has started.
*/
@property (nonatomic) BOOL canPerformMultiplePassesOverSourceMediaData NS_AVAILABLE(10_10, 8_0);
/*!
@property directoryForTemporaryFiles
@abstract
Specifies a directory that is suitable for containing temporary files generated during the export process
@discussion
AVAssetExportSession may need to write temporary files when configured in certain ways, such as when canPerformMultiplePassesOverSourceMediaData is set to YES. This property can be used to control where in the filesystem those temporary files are created. All temporary files will be deleted when the export is completed, is canceled, or fails.
When the value of this property is nil, the export session will choose a suitable location when writing temporary files. The default value is nil.
This property cannot be set after the export has started. The export will fail if the URL points to a location that is not a directory, does not exist, is not on the local file system, or if a file cannot be created in this directory (for example, due to insufficient permissions or sandboxing restrictions).
*/
@property (nonatomic, copy, nullable) NSURL *directoryForTemporaryFiles NS_AVAILABLE(10_10, 8_0);
@end
NS_ASSUME_NONNULL_END

View file

@ -1,191 +0,0 @@
/*
File: AVAssetImageGenerator.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
/*!
@class AVAssetImageGenerator
@abstract AVAssetImageGenerator provides thumbnail or preview images of assets independently of playback.
@discussion Generating a single image in isolation can require the decoding of a large number of video frames
with complex interdependencies. Whenever a series of images is required, far greater efficiency
can be achieved by use of the asynchronous method, -generateCGImagesAsynchronouslyForTimes:completionHandler:,
which employs decoding efficiencies similar to those used during playback.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMTime.h>
#import <CoreGraphics/CoreGraphics.h>
@class AVAsset;
@class AVVideoComposition;
@class AVAssetImageGeneratorInternal;
@protocol AVVideoCompositing;
NS_ASSUME_NONNULL_BEGIN
/*!
@typedef AVAssetImageGeneratorApertureMode
@abstract
The type of an aperture mode.
*/
typedef NSString * AVAssetImageGeneratorApertureMode NS_STRING_ENUM;
/*!
@constant AVAssetImageGeneratorApertureModeCleanAperture
@abstract Both pixel aspect ratio and clean aperture will be applied.
@discussion
An image's clean aperture is a region of video free from transition artifacts caused by the encoding of the signal.
*/
AVF_EXPORT AVAssetImageGeneratorApertureMode const AVAssetImageGeneratorApertureModeCleanAperture NS_AVAILABLE(10_7, 4_0);
/*!
@constant AVAssetImageGeneratorApertureModeProductionAperture
@abstract Only pixel aspect ratio will be applied.
@discussion
The image is not cropped to the clean aperture region, but it is scaled according to the pixel aspect ratio. Use this option when you want to see all the pixels in your video, including the edges.
*/
AVF_EXPORT AVAssetImageGeneratorApertureMode const AVAssetImageGeneratorApertureModeProductionAperture NS_AVAILABLE(10_7, 4_0);
/*!
@constant AVAssetImageGeneratorApertureModeEncodedPixels
@abstract Neither pixel aspect ratio nor clean aperture will be applied.
@discussion
The image is not cropped to the clean aperture region and is not scaled according to the pixel aspect ratio. The encoded dimensions of the image description are displayed.
*/
AVF_EXPORT AVAssetImageGeneratorApertureMode const AVAssetImageGeneratorApertureModeEncodedPixels NS_AVAILABLE(10_7, 4_0);
typedef NS_ENUM(NSInteger, AVAssetImageGeneratorResult)
{
AVAssetImageGeneratorSucceeded,
AVAssetImageGeneratorFailed,
AVAssetImageGeneratorCancelled,
};
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAssetImageGenerator : NSObject
{
@private
AVAssetImageGeneratorInternal *_priv;
}
AV_INIT_UNAVAILABLE
/* Indicates the instance of AVAsset with which the AVAssetImageGenerator was initialized */
@property (nonatomic, readonly) AVAsset *asset NS_AVAILABLE(10_9, 6_0);
/* Specifies whether or not to apply the track's preferredTransform (see -[AVAssetTrack preferredTransform]) when extracting an image from the asset.
Default is NO. Only rotation by 90, 180, or 270 degrees is supported. */
@property (nonatomic) BOOL appliesPreferredTrackTransform;
/* Specifies the maximum dimensions for generated image. Default (CGSizeZero) is the asset's unscaled dimensions.
AVAssetImageGenerator will scale images such that they fit within the defined bounding box.
Images will never be scaled up. The aspect ratio of the scaled image will be defined by the apertureMode property. */
@property (nonatomic) CGSize maximumSize;
/* Specifies the aperture mode for the generated image. Default is AVAssetImageGeneratorApertureModeCleanAperture. */
@property (nonatomic, copy, nullable) AVAssetImageGeneratorApertureMode apertureMode;
/* Specifies the video composition to use when extracting images from assets with multiple video tracks.
If no videoComposition is specified, only the first enabled video track will be used.
If a videoComposition is specified, the value of appliesPreferredTrackTransform is ignored. */
@property (nonatomic, copy, nullable) AVVideoComposition *videoComposition;
/* Indicates the custom video compositor instance used, if any */
@property (nonatomic, readonly, nullable) id <AVVideoCompositing> customVideoCompositor NS_AVAILABLE(10_9, 7_0);
/* The actual time of the generated images will be within the range [requestedTime-toleranceBefore, requestedTime+toleranceAfter] and may differ from the requested time for efficiency.
Pass kCMTimeZero for both toleranceBefore and toleranceAfter to request frame-accurate image generation; this may incur additional decoding delay.
Default is kCMTimePositiveInfinity. */
@property (nonatomic) CMTime requestedTimeToleranceBefore NS_AVAILABLE(10_7, 5_0);
@property (nonatomic) CMTime requestedTimeToleranceAfter NS_AVAILABLE(10_7, 5_0);
/*!
@method assetImageGeneratorWithAsset:
@abstract Returns an instance of AVAssetImageGenerator for use with the specified asset.
@param asset
The asset from which images will be extracted.
@result An instance of AVAssetImageGenerator
@discussion This method may succeed even if the asset possesses no visual tracks at the time of initialization.
Clients may wish to test whether an asset has any tracks with the visual characteristic via
-[AVAsset tracksWithMediaCharacteristic:].
Note also that assets that belong to a mutable subclass of AVAsset, AVMutableComposition or AVMutableMovie,
may gain visual tracks after initialization of an associated AVAssetImageGenerator.
However, the results of image generation are undefined if mutations of the asset occur while images
are being generated.
AVAssetImageGenerator will use the default enabled video track(s) to generate images.
*/
+ (instancetype)assetImageGeneratorWithAsset:(AVAsset *)asset;
/*!
@method initWithAsset:
@abstract Initializes an instance of AVAssetImageGenerator for use with the specified asset.
@param asset
The asset from which images will be extracted.
@result An instance of AVAssetImageGenerator
@discussion This method may succeed even if the asset possesses no visual tracks at the time of initialization.
Clients may wish to test whether an asset has any tracks with the visual characteristic via
-[AVAsset tracksWithMediaCharacteristic:].
Note also that assets that belong to a mutable subclass of AVAsset, AVMutableComposition or AVMutableMovie,
may gain visual tracks after initialization of an associated AVAssetImageGenerator.
However, the results of image generation are undefined if mutations of the asset occur while images
are being generated.
AVAssetImageGenerator will use the default enabled video track(s) to generate images.
*/
- (instancetype)initWithAsset:(AVAsset *)asset NS_DESIGNATED_INITIALIZER;
/*!
@method copyCGImageAtTime:actualTime:error:
@abstract Returns a CFRetained CGImageRef for an asset at or near the specified time.
@param requestedTime
The time at which the image of the asset is to be created.
@param actualTime
A pointer to a CMTime to receive the time at which the image was actually generated. If you are not interested
in this information, pass NULL.
@param outError
An error object describing the reason for failure, in the event that this method returns NULL.
@result A CGImageRef.
@discussion Returns the CGImage synchronously. Ownership follows the Create Rule.
*/
- (nullable CGImageRef)copyCGImageAtTime:(CMTime)requestedTime actualTime:(nullable CMTime *)actualTime error:(NSError * _Nullable * _Nullable)outError CF_RETURNS_RETAINED;
/* error object indicates the reason for failure if the result is AVAssetImageGeneratorFailed */
typedef void (^AVAssetImageGeneratorCompletionHandler)(CMTime requestedTime, CGImageRef _Nullable image, CMTime actualTime, AVAssetImageGeneratorResult result, NSError * _Nullable error);
/*!
@method generateCGImagesAsynchronouslyForTimes:completionHandler:
@abstract Returns a series of CGImageRefs for an asset at or near the specified times.
@param requestedTimes
An NSArray of NSValues, each containing a CMTime, specifying the asset times at which an image is requested.
@param handler
A block that will be called when an image request is complete.
@discussion Employs an efficient "batch mode" for getting images in time order.
The client will receive exactly one handler callback for each requested time in requestedTimes.
Changes to generator properties (snap behavior, maximum size, etc...) will not affect outstanding asynchronous image generation requests.
The generated image is not retained. Clients should retain the image if they wish it to persist after the completion handler returns.
*/
- (void)generateCGImagesAsynchronouslyForTimes:(NSArray<NSValue *> *)requestedTimes completionHandler:(AVAssetImageGeneratorCompletionHandler)handler;
/*!
@method cancelAllCGImageGeneration
@abstract Cancels all outstanding image generation requests.
@discussion Calls the handler block with AVAssetImageGeneratorCancelled for each image time in every previous invocation of -generateCGImagesAsynchronouslyForTimes:completionHandler:
for which images have not yet been supplied.
*/
- (void)cancelAllCGImageGeneration;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,210 +0,0 @@
/*
File: AVAssetReader.h
Framework: AVFoundation
Copyright 2010-2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMTime.h>
#import <CoreMedia/CMTimeRange.h>
#import <CoreMedia/CMSampleBuffer.h>
@class AVAsset;
@class AVAssetReaderOutput;
@class AVAssetReaderInternal;
NS_ASSUME_NONNULL_BEGIN
/*!
@enum AVAssetReaderStatus
@abstract
These constants are returned by the AVAssetReader status property to indicate whether it can successfully read samples from its asset.
@constant AVAssetReaderStatusUnknown
Indicates that the status of the asset reader is not currently known.
@constant AVAssetReaderStatusReading
Indicates that the asset reader is successfully reading samples from its asset.
@constant AVAssetReaderStatusCompleted
Indicates that the asset reader has successfully read all of the samples in its time range.
@constant AVAssetReaderStatusFailed
Indicates that the asset reader can no longer read samples from its asset because of an error. The error is described by the value of the asset reader's error property.
@constant AVAssetReaderStatusCancelled
Indicates that the asset reader can no longer read samples because reading was canceled with the cancelReading method.
*/
typedef NS_ENUM(NSInteger, AVAssetReaderStatus) {
AVAssetReaderStatusUnknown = 0,
AVAssetReaderStatusReading,
AVAssetReaderStatusCompleted,
AVAssetReaderStatusFailed,
AVAssetReaderStatusCancelled,
};
/*!
@class AVAssetReader
@abstract
AVAssetReader provides services for obtaining media data from an asset.
@discussion
Instances of AVAssetReader read media data from an instance of AVAsset, whether the asset is file-based or represents an assembly of media data from multiple sources, as is the case with AVComposition.
Clients of AVAssetReader can read data from specific tracks of an asset and in specific formats by adding concrete instances of AVAssetReaderOutput to an AVAssetReader instance.
AVAssetReaderTrackOutput, a concrete subclass of AVAssetReaderOutput, can either read the track's media samples in the format in which they are stored by the asset or convert the media samples to a different format.
AVAssetReaderAudioMixOutput mixes multiple audio tracks of the asset after reading them, while AVAssetReaderVideoCompositionOutput composites multiple video tracks after reading them.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetReader : NSObject
{
@private
AVAssetReaderInternal *_priv;
}
AV_INIT_UNAVAILABLE
/*!
@method assetReaderWithAsset:error:
@abstract
Returns an instance of AVAssetReader for reading media data from the specified asset.
@param asset
The asset from which media data is to be read.
@param outError
On return, if initialization of the AVAssetReader fails, points to an NSError describing the nature of the failure.
@result An instance of AVAssetReader.
@discussion
If the specified asset belongs to a mutable subclass of AVAsset, AVMutableComposition or AVMutableMovie, the results of any asset reading operation are undefined if you mutate the asset after invoking -startReading.
*/
+ (nullable instancetype)assetReaderWithAsset:(AVAsset *)asset error:(NSError * _Nullable * _Nullable)outError;
/*!
@method initWithAsset:error:
@abstract
Creates an instance of AVAssetReader for reading media data from the specified asset.
@param asset
The asset from which media data is to be read.
@param outError
On return, if initialization of the AVAssetReader fails, points to an NSError describing the nature of the failure.
@result
An instance of AVAssetReader.
@discussion
If the specified asset belongs to a mutable subclass of AVAsset, AVMutableComposition or AVMutableMovie, the results of any asset reading operation are undefined if you mutate the asset after invoking -startReading.
*/
- (nullable instancetype)initWithAsset:(AVAsset *)asset error:(NSError * _Nullable * _Nullable)outError NS_DESIGNATED_INITIALIZER;
/*!
@property asset
@abstract
The asset from which the receiver's outputs read sample buffers.
@discussion
The value of this property is an AVAsset. Concrete instances of AVAssetReader that are created with specific AVAssetTrack instances must obtain those tracks from the asset returned by this property.
*/
@property (nonatomic, retain, readonly) AVAsset *asset;
/*!
@property status
@abstract
The status of reading sample buffers from the receiver's asset.
@discussion
The value of this property is an AVAssetReaderStatus that indicates whether reading is in progress, has completed successfully, has been canceled, or has failed. Clients of AVAssetReaderOutput objects should check the value of this property after -[AVAssetReaderOutput copyNextSampleBuffer] returns NULL to determine why no more samples could be read. This property is thread safe.
*/
@property (readonly) AVAssetReaderStatus status;
/*!
@property error
@abstract
If the receiver's status is AVAssetReaderStatusFailed, this describes the error that caused the failure.
@discussion
The value of this property is an NSError that describes what caused the receiver to no longer be able to read its asset. If the receiver's status is not AVAssetReaderStatusFailed, the value of this property is nil. This property is thread safe.
*/
@property (readonly, nullable) NSError *error;
/*!
@property timeRange
@abstract
Specifies a range of time that may limit the temporal portion of the receiver's asset from which media data will be read.
@discussion
The intersection of the value of timeRange and CMTimeRangeMake(kCMTimeZero, asset.duration) will determine the time range of the asset from which media data will be read. The default value of timeRange is CMTimeRangeMake(kCMTimeZero, kCMTimePositiveInfinity).
This property cannot be set after reading has started.
*/
@property (nonatomic) CMTimeRange timeRange;
/*!
@property outputs
@abstract
The outputs from which clients of receiver can read media data.
@discussion
The value of this property is an NSArray containing concrete instances of AVAssetReaderOutput. Outputs can be added to the receiver using the addOutput: method.
*/
@property (nonatomic, readonly) NSArray<AVAssetReaderOutput *> *outputs;
/*!
@method canAddOutput:
@abstract
Tests whether an output can be added to the receiver.
@param output
The AVAssetReaderOutput object to be tested.
@result
A BOOL indicating whether the output can be added to the receiver.
@discussion
An output that reads from a track of an asset other than the asset used to initialize the receiver cannot be added.
*/
- (BOOL)canAddOutput:(AVAssetReaderOutput *)output;
/*!
@method addOutput:
@abstract
Adds an output to the receiver.
@param output
The AVAssetReaderOutput object to be added.
@discussion
Outputs are created with a reference to one or more AVAssetTrack objects. These tracks must be owned by the asset returned by the receiver's asset property.
Outputs cannot be added after reading has started.
*/
- (void)addOutput:(AVAssetReaderOutput *)output;
/*!
@method startReading
@abstract
Prepares the receiver for reading sample buffers from the asset.
@result
A BOOL indicating whether reading could be started.
@discussion
This method validates the entire collection of settings for outputs for tracks, for audio mixing, and for video composition and initiates reading from the receiver's asset.
If this method returns NO, clients can determine the nature of the failure by checking the value of the status and error properties.
*/
- (BOOL)startReading;
/*!
@method cancelReading
@abstract
Cancels any background work and prevents the receiver's outputs from reading more samples.
@discussion
Clients that want to stop reading samples from the receiver before reaching the end of its time range should call this method to stop any background read ahead operations that the may have been in progress.
This method should not be called concurrently with any calls to -[AVAssetReaderOutput copyNextSampleBuffer].
*/
- (void)cancelReading;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,611 +0,0 @@
/*
File: AVAssetReaderOutput.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVVideoComposition.h>
#import <AVFoundation/AVAudioProcessingSettings.h>
#import <CoreMedia/CMTime.h>
#import <CoreMedia/CMSampleBuffer.h>
@class AVAssetTrack;
@class AVAudioMixInputParameters;
@class AVAudioMix;
@class AVVideoComposition;
@class AVAssetReaderOutputInternal;
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAssetReaderOutput
@abstract
AVAssetReaderOutput is an abstract class that defines an interface for reading a single collection of samples of a common media type from an AVAssetReader.
@discussion
Clients can read the media data of an asset by adding one or more concrete instances of AVAssetReaderOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
IMPORTANT PERFORMANCE NOTE: Make sure to set the alwaysCopiesSampleData property to NO if you do not need to modify the sample data in-place, to avoid unnecessary and inefficient copying.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetReaderOutput : NSObject
{
@private
AVAssetReaderOutputInternal *_internal;
}
/*!
@property mediaType
@abstract
The media type of the samples that can be read from the receiver.
@discussion
The value of this property is one of the media type strings defined in AVMediaFormat.h.
*/
@property (nonatomic, readonly) NSString *mediaType;
/*!
@property alwaysCopiesSampleData
@abstract
Indicates whether or not the data in buffers gets copied before being vended to the client.
@discussion
When the value of this property is YES, the AVAssetReaderOutput will always vend a buffer with copied data to the client. Data in such buffers can be freely modified by the client. When the value of this property is NO, the buffers vended to the client may not be copied. Such buffers may still be referenced by other entities. The result of modifying a buffer whose data hasn't been copied is undefined. Requesting buffers whose data hasn't been copied when possible can lead to performance improvements.
The default value is YES.
*/
@property (nonatomic) BOOL alwaysCopiesSampleData NS_AVAILABLE(10_8, 5_0);
/*!
@method copyNextSampleBuffer
@abstract
Copies the next sample buffer for the output synchronously.
@result
A CMSampleBuffer object referencing the output sample buffer.
@discussion
The client is responsible for calling CFRelease on the returned CMSampleBuffer object when finished with it. This method will return NULL if there are no more sample buffers available for the receiver within the time range specified by its AVAssetReader's timeRange property, or if there is an error that prevents the AVAssetReader from reading more media data. When this method returns NULL, clients should check the value of the associated AVAssetReader's status property to determine why no more samples could be read.
*/
- (nullable CMSampleBufferRef)copyNextSampleBuffer CF_RETURNS_RETAINED;
@end
@interface AVAssetReaderOutput (AVAssetReaderOutputRandomAccess)
/*!
@property supportsRandomAccess
@abstract
Indicates whether the asset reader output supports reconfiguration of the time ranges to read.
@discussion
When the value of this property is YES, the time ranges read by the asset reader output can be reconfigured during reading using the -resetForReadingTimeRanges: method. This also prevents the attached AVAssetReader from progressing to AVAssetReaderStatusCompleted until -markConfigurationAsFinal has been invoked.
The default value is NO, which means that the asset reader output may not be reconfigured once reading has begin. When the value of this property is NO, AVAssetReader may be able to read media data more efficiently, particularly when multiple asset reader outputs are attached.
This property may not be set after -startReading has been called on the attached asset reader.
*/
@property (nonatomic) BOOL supportsRandomAccess NS_AVAILABLE(10_10, 8_0);
/*!
@method resetForReadingTimeRanges:
@abstract
Starts reading over with a new set of time ranges.
@param timeRanges
An NSArray of NSValue objects, each representing a single CMTimeRange structure
@discussion
This method may only be used if supportsRandomAccess has been set to YES and may not be called after -markConfigurationAsFinal has been invoked.
This method is often used in conjunction with AVAssetWriter multi-pass (see AVAssetWriterInput category AVAssetWriterInputMultiPass). In this usage, the caller will invoke -copyNextSampleBuffer until that method returns NULL and then ask the AVAssetWriterInput for a set of time ranges from which it thinks media data should be re-encoded. These time ranges are then given to this method to set up the asset reader output for the next pass.
The time ranges set here override the time range set on AVAssetReader.timeRange. Just as with that property, for each time range in the array the intersection of that time range and CMTimeRangeMake(kCMTimeZero, asset.duration) will take effect. If the start times of each time range in the array are not strictly increasing or if two or more time ranges in the array overlap, an NSInvalidArgumentException will be raised. It is an error to include a time range with a non-numeric start time or duration (see CMTIME_IS_NUMERIC), unless the duration is kCMTimePositiveInfinity.
If this method is invoked after the status of the attached AVAssetReader has become AVAssetReaderStatusFailed or AVAssetReaderStatusCancelled, no change in status will occur and the result of the next call to -copyNextSampleBuffer will be NULL.
If this method is invoked before all media data has been read (i.e. -copyNextSampleBuffer has not yet returned NULL), an exception will be thrown. This method may not be called before -startReading has been invoked on the attached asset reader.
*/
- (void)resetForReadingTimeRanges:(NSArray<NSValue *> *)timeRanges NS_AVAILABLE(10_10, 8_0);
/*!
@method markConfigurationAsFinal
@abstract
Informs the receiver that no more reconfiguration of time ranges is necessary and allows the attached AVAssetReader to advance to AVAssetReaderStatusCompleted.
@discussion
When the value of supportsRandomAccess is YES, the attached asset reader will not advance to AVAssetReaderStatusCompleted until this method is called.
When the destination of media data vended by the receiver is an AVAssetWriterInput configured for multi-pass encoding, a convenient time to invoke this method is after the asset writer input indicates that no more passes will be performed.
Once this method has been called, further invocations of -resetForReadingTimeRanges: are disallowed.
*/
- (void)markConfigurationAsFinal NS_AVAILABLE(10_10, 8_0);
@end
@class AVAssetReaderTrackOutputInternal;
/*!
@class AVAssetReaderTrackOutput
@abstract
AVAssetReaderTrackOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading media data from a single AVAssetTrack of an AVAssetReader's AVAsset.
@discussion
Clients can read the media data of an asset track by adding an instance of AVAssetReaderTrackOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method. The track's media samples can either be read in the format in which they are stored in the asset, or they can be converted to a different format.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetReaderTrackOutput : AVAssetReaderOutput
{
@private
AVAssetReaderTrackOutputInternal *_trackOutputInternal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetReaderTrackOutputWithTrack:outputSettings:
@abstract
Returns an instance of AVAssetReaderTrackOutput for reading from the specified track and supplying media data according to the specified output settings.
@param track
The AVAssetTrack from which the resulting AVAssetReaderTrackOutput should read sample buffers.
@param outputSettings
An NSDictionary of output settings to be used for sample output. See AVAudioSettings.h for available output settings for audio tracks or AVVideoSettings.h for available output settings for video tracks and also for more information about how to construct an output settings dictionary.
@result
An instance of AVAssetReaderTrackOutput.
@discussion
The track must be one of the tracks contained by the target AVAssetReader's asset.
A value of nil for outputSettings configures the output to vend samples in their original format as stored by the specified track. Initialization will fail if the output settings cannot be used with the specified track.
AVAssetReaderTrackOutput can only produce uncompressed output. For audio output settings, this means that AVFormatIDKey must be kAudioFormatLinearPCM. For video output settings, this means that the dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. AVAssetReaderTrackOutput does not support the AVAudioSettings.h key AVSampleRateConverterAudioQualityKey or the following AVVideoSettings.h keys:
AVVideoCleanApertureKey
AVVideoPixelAspectRatioKey
AVVideoScalingModeKey
When constructing video output settings the choice of pixel format will affect the performance and quality of the decompression. For optimal performance when decompressing video the requested pixel format should be one that the decoder supports natively to avoid unnecessary conversions. Below are some recommendations:
For H.264 use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, or kCVPixelFormatType_420YpCbCr8BiPlanarFullRange if the video is known to be full range. For JPEG on iOS, use kCVPixelFormatType_420YpCbCr8BiPlanarFullRange.
For other codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when decoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended on iOS and kCVPixelFormatType_32ARGB is recommended on OSX.
ProRes encoded media can contain up to 12bits/ch. If your source is ProRes encoded and you wish to preserve more than 8bits/ch during decompression then use one of the following pixel formats: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, kCVPixelFormatType_422YpCbCr10, or kCVPixelFormatType_64ARGB. AVAssetReader does not support scaling with any of these high bit depth pixel formats. If you use them then do not specify kCVPixelBufferWidthKey or kCVPixelBufferHeightKey in your outputSettings dictionary. If you plan to append these sample buffers to an AVAssetWriterInput then note that only the ProRes encoders support these pixel formats.
ProRes 4444 encoded media can contain a mathematically lossless alpha channel. To preserve the alpha channel during decompression use a pixel format with an alpha component such as kCVPixelFormatType_4444AYpCbCr16 or kCVPixelFormatType_64ARGB. To test whether your source contains an alpha channel check that the track's format description has kCMFormatDescriptionExtension_Depth and that its value is 32.
*/
+ (instancetype)assetReaderTrackOutputWithTrack:(AVAssetTrack *)track outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings;
/*!
@method initWithTrack:outputSettings:
@abstract
Returns an instance of AVAssetReaderTrackOutput for reading from the specified track and supplying media data according to the specified output settings.
@param track
The AVAssetTrack from which the resulting AVAssetReaderTrackOutput should read sample buffers.
@param outputSettings
An NSDictionary of output settings to be used for sample output. See AVAudioSettings.h for available output settings for audio tracks or AVVideoSettings.h for available output settings for video tracks and also for more information about how to construct an output settings dictionary.
@result
An instance of AVAssetReaderTrackOutput.
@discussion
The track must be one of the tracks contained by the target AVAssetReader's asset.
A value of nil for outputSettings configures the output to vend samples in their original format as stored by the specified track. Initialization will fail if the output settings cannot be used with the specified track.
AVAssetReaderTrackOutput can only produce uncompressed output. For audio output settings, this means that AVFormatIDKey must be kAudioFormatLinearPCM. For video output settings, this means that the dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. AVAssetReaderTrackOutput does not support the AVAudioSettings.h key AVSampleRateConverterAudioQualityKey or the following AVVideoSettings.h keys:
AVVideoCleanApertureKey
AVVideoPixelAspectRatioKey
AVVideoScalingModeKey
When constructing video output settings the choice of pixel format will affect the performance and quality of the decompression. For optimal performance when decompressing video the requested pixel format should be one that the decoder supports natively to avoid unnecessary conversions. Below are some recommendations:
For H.264 use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, or kCVPixelFormatType_420YpCbCr8BiPlanarFullRange if the video is known to be full range. For JPEG on iOS, use kCVPixelFormatType_420YpCbCr8BiPlanarFullRange.
For other codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when decoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended on iOS and kCVPixelFormatType_32ARGB is recommended on OSX.
ProRes encoded media can contain up to 12bits/ch. If your source is ProRes encoded and you wish to preserve more than 8bits/ch during decompression then use one of the following pixel formats: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, kCVPixelFormatType_422YpCbCr10, or kCVPixelFormatType_64ARGB. AVAssetReader does not support scaling with any of these high bit depth pixel formats. If you use them then do not specify kCVPixelBufferWidthKey or kCVPixelBufferHeightKey in your outputSettings dictionary. If you plan to append these sample buffers to an AVAssetWriterInput then note that only the ProRes encoders support these pixel formats.
ProRes 4444 encoded media can contain a mathematically lossless alpha channel. To preserve the alpha channel during decompression use a pixel format with an alpha component such as kCVPixelFormatType_4444AYpCbCr16 or kCVPixelFormatType_64ARGB. To test whether your source contains an alpha channel check that the track's format description has kCMFormatDescriptionExtension_Depth and that its value is 32.
*/
- (instancetype)initWithTrack:(AVAssetTrack *)track outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings NS_DESIGNATED_INITIALIZER;
/*!
@property track
@abstract
The track from which the receiver reads sample buffers.
@discussion
The value of this property is an AVAssetTrack owned by the target AVAssetReader's asset.
*/
@property (nonatomic, readonly) AVAssetTrack *track;
/*!
@property outputSettings
@abstract
The output settings used by the receiver.
@discussion
The value of this property is an NSDictionary that contains values for keys as specified by either AVAudioSettings.h for audio tracks or AVVideoSettings.h for video tracks. A value of nil indicates that the receiver will vend samples in their original format as stored in the target track.
*/
@property (nonatomic, readonly, nullable) NSDictionary<NSString *, id> *outputSettings;
/*!
@property audioTimePitchAlgorithm
@abstract
Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
@discussion
Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h. An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
The default value is AVAudioTimePitchAlgorithmSpectral.
*/
@property (nonatomic, copy) AVAudioTimePitchAlgorithm audioTimePitchAlgorithm NS_AVAILABLE(10_9, 7_0);
@end
@class AVAssetReaderAudioMixOutputInternal;
/*!
@class AVAssetReaderAudioMixOutput
@abstract
AVAssetReaderAudioMixOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading audio samples that result from mixing the audio from one or more AVAssetTracks of an AVAssetReader's AVAsset.
@discussion
Clients can read the audio data mixed from one or more asset tracks by adding an instance of AVAssetReaderAudioMixOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetReaderAudioMixOutput : AVAssetReaderOutput
{
@private
AVAssetReaderAudioMixOutputInternal *_audioMixOutputInternal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetReaderAudioMixOutputWithAudioTracks:audioSettings:
@abstract
Returns an instance of AVAssetReaderAudioMixOutput for reading mixed audio from the specified audio tracks, with optional audio settings.
@param tracks
An NSArray of AVAssetTrack objects from which the created object should read sample buffers to be mixed.
@param audioSettings
An NSDictionary of audio settings to be used for audio output.
@result
An instance of AVAssetReaderAudioMixOutput.
@discussion
Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeAudio.
The audio settings dictionary must contain values for keys in AVAudioSettings.h (linear PCM only). A value of nil configures the output to return samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the specified audio tracks. Initialization will fail if the audio settings cannot be used with the specified tracks. AVSampleRateConverterAudioQualityKey is not supported.
*/
+ (instancetype)assetReaderAudioMixOutputWithAudioTracks:(NSArray<AVAssetTrack *> *)audioTracks audioSettings:(nullable NSDictionary<NSString *, id> *)audioSettings;
/*!
@method initWithAudioTracks:audioSettings:
@abstract
Creates an instance of AVAssetReaderAudioMixOutput for reading mixed audio from the specified audio tracks, with optional audio settings.
@param tracks
An NSArray of AVAssetTrack objects from which the created object should read sample buffers to be mixed.
@param audioSettings
An NSDictionary of audio settings to be used for audio output.
@result
An instance of AVAssetReaderAudioMixOutput.
@discussion
Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeAudio.
The audio settings dictionary must contain values for keys in AVAudioSettings.h (linear PCM only). A value of nil configures the output to return samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the specified audio tracks. Initialization will fail if the audio settings cannot be used with the specified tracks. AVSampleRateConverterAudioQualityKey is not supported.
*/
- (instancetype)initWithAudioTracks:(NSArray<AVAssetTrack *> *)audioTracks audioSettings:(nullable NSDictionary<NSString *, id> *)audioSettings NS_DESIGNATED_INITIALIZER;
/*!
@property audioTracks
@abstract
The tracks from which the receiver reads mixed audio.
@discussion
The value of this property is an NSArray of AVAssetTracks owned by the target AVAssetReader's asset.
*/
@property (nonatomic, readonly) NSArray<AVAssetTrack *> *audioTracks;
/*!
@property audioSettings
@abstract
The audio settings used by the receiver.
@discussion
The value of this property is an NSDictionary that contains values for keys from AVAudioSettings.h (linear PCM only). A value of nil indicates that the receiver will return audio samples in a convenient uncompressed format, with sample rate and other properties determined according to the properties of the receiver's audio tracks.
*/
@property (nonatomic, readonly, nullable) NSDictionary<NSString *, id> *audioSettings;
/*!
@property audioMix
@abstract
The audio mix used by the receiver.
@discussion
The value of this property is an AVAudioMix that can be used to specify how the volume of audio samples read from each source track will change over the timeline of the source asset.
This property cannot be set after reading has started.
*/
@property (nonatomic, copy, nullable) AVAudioMix *audioMix;
/*!
@property audioTimePitchAlgorithm
@abstract
Indicates the processing algorithm used to manage audio pitch for scaled audio edits.
@discussion
Constants for various time pitch algorithms, e.g. AVAudioTimePitchAlgorithmSpectral, are defined in AVAudioProcessingSettings.h. An NSInvalidArgumentException will be raised if this property is set to a value other than the constants defined in that file.
The default value is AVAudioTimePitchAlgorithmSpectral.
*/
@property (nonatomic, copy) AVAudioTimePitchAlgorithm audioTimePitchAlgorithm NS_AVAILABLE(10_9, 7_0);
@end
@class AVAssetReaderVideoCompositionOutputInternal;
/*!
@class AVAssetReaderVideoCompositionOutput
@abstract
AVAssetReaderVideoCompositionOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading video frames that have been composited together from the frames in one or more AVAssetTracks of an AVAssetReader's AVAsset.
@discussion
Clients can read the video frames composited from one or more asset tracks by adding an instance of AVAssetReaderVideoCompositionOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetReaderVideoCompositionOutput : AVAssetReaderOutput
{
@private
AVAssetReaderVideoCompositionOutputInternal *_videoCompositionOutputInternal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetReaderVideoCompositionOutputWithVideoTracks:videoSettings:
@abstract
Creates an instance of AVAssetReaderVideoCompositionOutput for reading composited video from the specified video tracks and supplying media data according to the specified video settings.
@param tracks
An NSArray of AVAssetTrack objects from which the resulting AVAssetReaderVideoCompositionOutput should read video frames for compositing.
@param videoSettings
An NSDictionary of video settings to be used for video output. See AVVideoSettings.h for more information about how to construct a video settings dictionary.
@result
An instance of AVAssetReaderVideoCompositionOutput.
@discussion
Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeVideo.
A value of nil for videoSettings configures the output to return samples in a convenient uncompressed format, with properties determined according to the properties of the specified video tracks. Initialization will fail if the video settings cannot be used with the specified tracks.
AVAssetReaderVideoCompositionOutput can only produce uncompressed output. This means that the video settings dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. In addition, the following keys are not supported:
AVVideoCleanApertureKey
AVVideoPixelAspectRatioKey
AVVideoScalingModeKey
*/
+ (instancetype)assetReaderVideoCompositionOutputWithVideoTracks:(NSArray<AVAssetTrack *> *)videoTracks videoSettings:(nullable NSDictionary<NSString *, id> *)videoSettings;
/*!
@method initWithVideoTracks:videoSettings:
@abstract
Creates an instance of AVAssetReaderVideoCompositionOutput for reading composited video from the specified video tracks and supplying media data according to the specified video settings.
@param tracks
An NSArray of AVAssetTrack objects from which the resulting AVAssetReaderVideoCompositionOutput should read video frames for compositing.
@param videoSettings
An NSDictionary of video settings to be used for video output. See AVVideoSettings.h for more information about how to construct a video settings dictionary.
@result An instance of AVAssetReaderVideoCompositionOutput.
@discussion
Each track must be one of the tracks owned by the target AVAssetReader's asset and must be of media type AVMediaTypeVideo.
A value of nil for videoSettings configures the output to return samples in a convenient uncompressed format, with properties determined according to the properties of the specified video tracks. Initialization will fail if the video settings cannot be used with the specified tracks.
AVAssetReaderVideoCompositionOutput can only produce uncompressed output. This means that the video settings dictionary must follow the rules for uncompressed video output, as laid out in AVVideoSettings.h. In addition, the following keys are not supported:
AVVideoCleanApertureKey
AVVideoPixelAspectRatioKey
AVVideoScalingModeKey
*/
- (instancetype)initWithVideoTracks:(NSArray<AVAssetTrack *> *)videoTracks videoSettings:(nullable NSDictionary<NSString *, id> *)videoSettings NS_DESIGNATED_INITIALIZER;
/*!
@property videoTracks
@abstract
The tracks from which the receiver reads composited video.
@discussion
The value of this property is an NSArray of AVAssetTracks owned by the target AVAssetReader's asset.
*/
@property (nonatomic, readonly) NSArray<AVAssetTrack *> *videoTracks;
/*!
@property videoSettings
@abstract
The video settings used by the receiver.
@discussion
The value of this property is an NSDictionary that contains values for keys as specified by AVVideoSettings.h. A value of nil indicates that the receiver will return video frames in a convenient uncompressed format, with properties determined according to the properties of the receiver's video tracks.
*/
@property (nonatomic, readonly, nullable) NSDictionary<NSString *, id> *videoSettings;
/*!
@property videoComposition
@abstract
The composition of video used by the receiver.
@discussion
The value of this property is an AVVideoComposition that can be used to specify the visual arrangement of video frames read from each source track over the timeline of the source asset.
This property cannot be set after reading has started.
*/
@property (nonatomic, copy, nullable) AVVideoComposition *videoComposition;
/*!
@property customVideoCompositor
@abstract
Indicates the custom video compositor instance used by the receiver.
@discussion
This property is nil if there is no video compositor, or if the internal video compositor is in use.
*/
@property (nonatomic, readonly, nullable) id <AVVideoCompositing> customVideoCompositor NS_AVAILABLE(10_9, 7_0);
@end
@class AVTimedMetadataGroup;
@class AVAssetReaderOutputMetadataAdaptorInternal;
/*!
@class AVAssetReaderOutputMetadataAdaptor
@abstract
Defines an interface for reading metadata, packaged as instances of AVTimedMetadataGroup, from a single AVAssetReaderTrackOutput object.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAssetReaderOutputMetadataAdaptor : NSObject
{
@private
AVAssetReaderOutputMetadataAdaptorInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetReaderOutputMetadataAdaptorWithAssetReaderTrackOutput:
@abstract
Creates a new timed metadata group adaptor for retrieving timed metadata group objects from an asset reader output.
@param assetReaderOutput
An instance of AVAssetReaderTrackOutput that vends sample buffers containing metadata, e.g. an AVAssetReaderTrackOutput object initialized with a track of media type AVMediaTypeMetadata and nil outputSettings.
@result
An instance of AVAssetReaderOutputMetadataAdaptor
@discussion
It is an error to create a timed metadata group adaptor with an asset reader output that does not vend metadata. It is also an error to create a timed metadata group adaptor with an asset reader output whose asset reader has already started reading, or an asset reader output that already has been used to initialize another timed metadata group adaptor.
Clients should not mix calls to -[AVAssetReaderTrackOutput copyNextSampleBuffer] and -[AVAssetReaderOutputMetadataAdaptor nextTimedMetadataGroup]. Once an AVAssetReaderTrackOutput instance has been used to initialize an AVAssetReaderOutputMetadataAdaptor, calling -copyNextSampleBuffer on that instance will result in an exception being thrown.
*/
+ (instancetype)assetReaderOutputMetadataAdaptorWithAssetReaderTrackOutput:(AVAssetReaderTrackOutput *)trackOutput;
/*!
@method initWithAssetReaderTrackOutput:
@abstract
Creates a new timed metadata group adaptor for retrieving timed metadata group objects from an asset reader output.
@param assetReaderOutput
An instance of AVAssetReaderTrackOutput that vends sample buffers containing metadata, e.g. an AVAssetReaderTrackOutput object initialized with a track of media type AVMediaTypeMetadata and nil outputSettings.
@result
An instance of AVAssetReaderOutputMetadataAdaptor
@discussion
It is an error to create a timed metadata group adaptor with an asset reader output that does not vend metadata. It is also an error to create a timed metadata group adaptor with an asset reader output whose asset reader has already started reading, or an asset reader output that already has been used to initialize another timed metadata group adaptor.
Clients should not mix calls to -[AVAssetReaderTrackOutput copyNextSampleBuffer] and -[AVAssetReaderOutputMetadataAdaptor nextTimedMetadataGroup]. Once an AVAssetReaderTrackOutput instance has been used to initialize an AVAssetReaderOutputMetadataAdaptor, calling -copyNextSampleBuffer on that instance will result in an exception being thrown.
*/
- (instancetype)initWithAssetReaderTrackOutput:(AVAssetReaderTrackOutput *)trackOutput NS_DESIGNATED_INITIALIZER;
/*!
@property assetReaderTrackOutput
@abstract
The asset reader track output from which the receiver pulls timed metadata groups.
*/
@property (nonatomic, readonly) AVAssetReaderTrackOutput *assetReaderTrackOutput;
/*!
@method nextTimedMetadataGroup
@abstract
Returns the next timed metadata group for the asset reader output, synchronously.
@result
An instance of AVTimedMetadataGroup, representing the next logical segment of metadata coming from the source asset reader output.
@discussion
This method will return nil when all timed metadata groups have been read from the asset reader output, or if there is an error that prevents the timed metadata group adaptor from reading more timed metadata groups. When this method returns nil, clients should check the value of the associated AVAssetReader's status property to determine why no more samples could be read.
Unlike -[AVAssetReaderTrackOutput copyNextSampleBuffer], this method returns an autoreleased object.
Before calling this method, you must ensure that the output which underlies the receiver is attached to an AVAssetReader via a prior call to -addOutput: and that -startReading has been called on the asset reader.
*/
- (nullable AVTimedMetadataGroup *)nextTimedMetadataGroup;
@end
@class AVAssetReaderSampleReferenceOutputInternal;
/*!
@class AVAssetReaderSampleReferenceOutput
@abstract
AVAssetReaderSampleReferenceOutput is a concrete subclass of AVAssetReaderOutput that defines an interface for reading sample references from a single AVAssetTrack of an AVAssetReader's AVAsset.
@discussion
Clients can extract information about the location (file URL and offset) of samples in a track by adding an instance of AVAssetReaderSampleReferenceOutput to an AVAssetReader using the -[AVAssetReader addOutput:] method. No actual sample data can be extracted using this class. The location of the sample data is described by the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachments on the extracted sample buffers. More information about sample buffers describing sample references can be found in the CMSampleBuffer documentation.
Sample buffers extracted using this class can also be appended to an AVAssetWriterInput to create movie tracks that are not self-contained and reference data in the original file instead. Currently, only instances of AVAssetWriter configured to write files of type AVFileTypeQuickTimeMovie can be used to write tracks that are not self-contained.
Since no sample data is ever returned by instances of AVAssetReaderSampleReferenceOutput, the value of the alwaysCopiesSampleData property is ignored.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAssetReaderSampleReferenceOutput : AVAssetReaderOutput
{
@private
AVAssetReaderSampleReferenceOutputInternal *_sampleReferenceOutputInternal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetReaderSampleReferenceOutputWithTrack:
@abstract
Returns an instance of AVAssetReaderSampleReferenceOutput for supplying sample references.
@param track
The AVAssetTrack for which the resulting AVAssetReaderSampleReferenceOutput should provide sample references.
@result
An instance of AVAssetReaderSampleReferenceOutput.
@discussion
The track must be one of the tracks contained by the target AVAssetReader's asset.
*/
+ (instancetype)assetReaderSampleReferenceOutputWithTrack:(AVAssetTrack *)track;
/*!
@method initWithTrack:
@abstract
Returns an instance of AVAssetReaderSampleReferenceOutput for supplying sample references.
@param track
The AVAssetTrack for which the resulting AVAssetReaderSampleReferenceOutput should provide sample references.
@result
An instance of AVAssetReaderTrackOutput.
@discussion
The track must be one of the tracks contained by the target AVAssetReader's asset.
*/
- (instancetype)initWithTrack:(AVAssetTrack *)track NS_DESIGNATED_INITIALIZER;
/*!
@property track
@abstract
The track from which the receiver extracts sample references.
@discussion
The value of this property is an AVAssetTrack owned by the target AVAssetReader's asset.
*/
@property (nonatomic, readonly) AVAssetTrack *track;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,430 +0,0 @@
/*
File: AVAssetResourceLoader.h
Framework: AVFoundation
Copyright 2010-2016 Apple Inc. All rights reserved.
*/
/*!
@class AVAssetResourceLoader
@abstract An AVAssetResourceLoader mediates requests to load resources required by an AVURLAsset by asking a delegate object that you provide for assistance. When a resource is required that cannot be loaded by the AVURLAsset itself, the resource loader makes a request of its delegate to load it and proceeds according to the delegate's response.
@discussion
You should not create resource loader objects. Instead, you should retrieve a resource loader from the resourceLoader property of an AVURLAsset and use it to assign your delegate object for resource loading.
The delegate associated with this object must adopt the AVAssetResourceLoaderDelegate protocol.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@protocol AVAssetResourceLoaderDelegate;
@class AVAssetResourceLoadingRequest;
@class AVAssetResourceRenewalRequest;
@class AVAssetResourceLoaderInternal;
NS_CLASS_AVAILABLE(10_9, 6_0)
@interface AVAssetResourceLoader : NSObject {
@private
AVAssetResourceLoaderInternal *_resourceLoader;
}
AV_INIT_UNAVAILABLE
/*!
@method setDelegate:queue:
@abstract Sets the receiver's delegate that will mediate resource loading and the dispatch queue on which delegate methods will be invoked.
@param delegate
An object conforming to the AVAssetResourceLoaderDelegate protocol.
@param delegateQueue
A dispatch queue on which all delegate methods will be invoked.
@discussion
If you employ an AVAssetResourceLoader delegate that loads media data for playback, you should set the value of your AVPlayers automaticallyWaitsToMinimizeStalling property to NO. Allowing the value of automaticallyWaitsToMinimizeStalling to remain YES its default value when an AVAssetResourceLoader delegate is used for the loading of media data can result in poor start-up times for playback and poor recovery from stalls, because the behaviors provided by AVPlayer when automaticallyWaitsToMinimizeStalling has a value of YES depend on predictions of the future availability of media data that that do not function as expected when data is loaded via a client-controlled means, using the AVAssetResourceLoader delegate interface.
You can allow the value of automaticallyWaitsToMinimizeStalling to remain YES if you use an AVAssetResourceLoader delegate to manage content keys for FairPlay Streaming, to provide dynamically-generated master playlists for HTTP Live Streaming, or to respond to authentication challenges, but not to load media data for playback.
*/
- (void)setDelegate:(nullable id <AVAssetResourceLoaderDelegate>)delegate queue:(nullable dispatch_queue_t)delegateQueue;
/*!
@property delegate
@abstract The receiver's delegate.
@discussion
The value of this property is an object conforming to the AVAssetResourceLoaderDelegate protocol. The delegate is set using the setDelegate:queue: method. The delegate is held using a zeroing-weak reference, so this property will have a value of nil after a delegate that was previously set has been deallocated.
*/
@property (nonatomic, readonly, weak, nullable) id <AVAssetResourceLoaderDelegate> delegate;
/*!
@property delegateQueue
@abstract The dispatch queue on which all delegate methods will be invoked.
@discussion
The value of this property is a dispatch_queue_t. The queue is set using the setDelegate:queue: method.
*/
@property (nonatomic, readonly, nullable) dispatch_queue_t delegateQueue;
@end
/*!
@protocol AVAssetResourceLoaderDelegate
@abstract The AVAssetResourceLoaderDelegate protocol defines methods that allow your code to handle resource loading requests coming from an AVULRAsset.
*/
@class NSURLAuthenticationChallenge;
@protocol AVAssetResourceLoaderDelegate <NSObject>
@optional
/*!
@method resourceLoader:shouldWaitForLoadingOfRequestedResource:
@abstract Invoked when assistance is required of the application to load a resource.
@param resourceLoader
The instance of AVAssetResourceLoader for which the loading request is being made.
@param loadingRequest
An instance of AVAssetResourceLoadingRequest that provides information about the requested resource.
@result YES if the delegate can load the resource indicated by the AVAssetResourceLoadingRequest; otherwise NO.
@discussion
Delegates receive this message when assistance is required of the application to load a resource. For example, this method is invoked to load decryption keys that have been specified using custom URL schemes.
If the result is YES, the resource loader expects invocation, either subsequently or immediately, of either -[AVAssetResourceLoadingRequest finishLoading] or -[AVAssetResourceLoadingRequest finishLoadingWithError:]. If you intend to finish loading the resource after your handling of this message returns, you must retain the instance of AVAssetResourceLoadingRequest until after loading is finished.
If the result is NO, the resource loader treats the loading of the resource as having failed.
Note that if the delegate's implementation of -resourceLoader:shouldWaitForLoadingOfRequestedResource: returns YES without finishing the loading request immediately, it may be invoked again with another loading request before the prior request is finished; therefore in such cases the delegate should be prepared to manage multiple loading requests.
If an AVURLAsset is added to an AVContentKeySession object and a delegate is set on its AVAssetResourceLoader, that delegate's resourceLoader:shouldWaitForLoadingOfRequestedResource: method must specify which custom URL requests should be handled as content keys. This is done by returning YES and passing either AVStreamingKeyDeliveryPersistentContentKeyType or AVStreamingKeyDeliveryContentKeyType into -[AVAssetResourceLoadingContentInformationRequest setContentType:] and then calling -[AVAssetResourceLoadingRequest finishLoading].
*/
- (BOOL)resourceLoader:(AVAssetResourceLoader *)resourceLoader shouldWaitForLoadingOfRequestedResource:(AVAssetResourceLoadingRequest *)loadingRequest NS_AVAILABLE(10_9, 6_0);
/*!
@method resourceLoader:shouldWaitForRenewalOfRequestedResource:
@abstract Invoked when assistance is required of the application to renew a resource.
@param resourceLoader
The instance of AVAssetResourceLoader for which the loading request is being made.
@param renewalRequest
An instance of AVAssetResourceRenewalRequest that provides information about the requested resource.
@result YES if the delegate can renew the resource indicated by the AVAssetResourceLoadingRequest; otherwise NO.
@discussion
Delegates receive this message when assistance is required of the application to renew a resource previously loaded by resourceLoader:shouldWaitForLoadingOfRequestedResource:. For example, this method is invoked to renew decryption keys that require renewal, as indicated in a response to a prior invocation of resourceLoader:shouldWaitForLoadingOfRequestedResource:.
If the result is YES, the resource loader expects invocation, either subsequently or immediately, of either -[AVAssetResourceRenewalRequest finishLoading] or -[AVAssetResourceRenewalRequest finishLoadingWithError:]. If you intend to finish loading the resource after your handling of this message returns, you must retain the instance of AVAssetResourceRenewalRequest until after loading is finished.
If the result is NO, the resource loader treats the loading of the resource as having failed.
Note that if the delegate's implementation of -resourceLoader:shouldWaitForRenewalOfRequestedResource: returns YES without finishing the loading request immediately, it may be invoked again with another loading request before the prior request is finished; therefore in such cases the delegate should be prepared to manage multiple loading requests.
If an AVURLAsset is added to an AVContentKeySession object and a delegate is set on its AVAssetResourceLoader, that delegate's resourceLoader:shouldWaitForRenewalOfRequestedResource:renewalRequest method must specify which custom URL requests should be handled as content keys. This is done by returning YES and passing either AVStreamingKeyDeliveryPersistentContentKeyType or AVStreamingKeyDeliveryContentKeyType into -[AVAssetResourceLoadingContentInformationRequest setContentType:] and then calling -[AVAssetResourceLoadingRequest finishLoading].
*/
- (BOOL)resourceLoader:(AVAssetResourceLoader *)resourceLoader shouldWaitForRenewalOfRequestedResource:(AVAssetResourceRenewalRequest *)renewalRequest NS_AVAILABLE(10_10, 8_0);
/*!
@method resourceLoader:didCancelLoadingRequest:
@abstract Informs the delegate that a prior loading request has been cancelled.
@param loadingRequest
The loading request that has been cancelled.
@discussion Previously issued loading requests can be cancelled when data from the resource is no longer required or when a loading request is superseded by new requests for data from the same resource. For example, if to complete a seek operation it becomes necessary to load a range of bytes that's different from a range previously requested, the prior request may be cancelled while the delegate is still handling it.
*/
- (void)resourceLoader:(AVAssetResourceLoader *)resourceLoader didCancelLoadingRequest:(AVAssetResourceLoadingRequest *)loadingRequest NS_AVAILABLE(10_9, 7_0);
/*!
@method resourceLoader:shouldWaitForResponseToAuthenticationChallenge:
@abstract Invoked when assistance is required of the application to respond to an authentication challenge.
@param resourceLoader
The instance of AVAssetResourceLoader asking for help with an authentication challenge.
@param authenticationChallenge
An instance of NSURLAuthenticationChallenge.
@discussion
Delegates receive this message when assistance is required of the application to respond to an authentication challenge.
If the result is YES, the resource loader expects you to send an appropriate response, either subsequently or immediately, to the NSURLAuthenticationChallenge's sender, i.e. [authenticationChallenge sender], via use of one of the messages defined in the NSURLAuthenticationChallengeSender protocol (see NSAuthenticationChallenge.h). If you intend to respond to the authentication challenge after your handling of -resourceLoader:shouldWaitForResponseToAuthenticationChallenge: returns, you must retain the instance of NSURLAuthenticationChallenge until after your response has been made.
*/
- (BOOL)resourceLoader:(AVAssetResourceLoader *)resourceLoader shouldWaitForResponseToAuthenticationChallenge:(NSURLAuthenticationChallenge *)authenticationChallenge NS_AVAILABLE(10_10, 8_0);
/*!
@method resourceLoader:didCancelAuthenticationChallenge:
@abstract Informs the delegate that a prior authentication challenge has been cancelled.
@param authenticationChallenge
The authentication challenge that has been cancelled.
*/
- (void)resourceLoader:(AVAssetResourceLoader *)resourceLoader didCancelAuthenticationChallenge:(NSURLAuthenticationChallenge *)authenticationChallenge NS_AVAILABLE(10_10, 8_0);
@end
/*!
@class AVAssetResourceLoadingRequest
@abstract AVAssetResourceLoadingRequest encapsulates information about a resource request issued by a resource loader.
@discussion
When an AVURLAsset needs help loading a resource, it asks its AVAssetResourceLoader object to assist. The resource loader encapsulates the request information by creating an instance of this object, which it then hands to its delegate for processing. The delegate uses the information in this object to perform the request and report on the success or failure of the operation.
*/
@class AVAssetResourceLoadingRequestInternal;
@class AVAssetResourceLoadingContentInformationRequest;
@class AVAssetResourceLoadingDataRequest;
NS_CLASS_AVAILABLE(10_9, 6_0)
@interface AVAssetResourceLoadingRequest : NSObject {
@private
AVAssetResourceLoadingRequestInternal *_loadingRequest;
}
AV_INIT_UNAVAILABLE
/*!
@property request
@abstract An NSURLRequest for the requested resource.
*/
@property (nonatomic, readonly) NSURLRequest *request;
/*!
@property finished
@abstract Indicates whether loading of the resource has been finished.
@discussion The value of this property becomes YES only in response to an invocation of either -finishLoading or -finishLoadingWithError:.
*/
@property (nonatomic, readonly, getter=isFinished) BOOL finished;
/*!
@property cancelled
@abstract Indicates whether the request has been cancelled.
@discussion The value of this property becomes YES when the resource loader cancels the loading of a request, just prior to sending the message -resourceLoader:didCancelLoadingRequest: to its delegate.
*/
@property (nonatomic, readonly, getter=isCancelled) BOOL cancelled NS_AVAILABLE(10_9, 7_0);
/*!
@property contentInformationRequest
@abstract An instance of AVAssetResourceLoadingContentInformationRequest that you should populate with information about the resource. The value of this property will be nil if no such information is being requested.
*/
@property (nonatomic, readonly, nullable) AVAssetResourceLoadingContentInformationRequest *contentInformationRequest NS_AVAILABLE(10_9, 7_0);
/*!
@property dataRequest
@abstract An instance of AVAssetResourceLoadingDataRequest that indicates the range of resource data that's being requested. The value of this property will be nil if no data is being requested.
*/
@property (nonatomic, readonly, nullable) AVAssetResourceLoadingDataRequest *dataRequest NS_AVAILABLE(10_9, 7_0);
/*!
@property response
@abstract Set the value of this property to an instance of NSURLResponse indicating a response to the loading request. If no response is needed, leave the value of this property set to nil.
*/
@property (nonatomic, copy, nullable) NSURLResponse *response NS_AVAILABLE(10_9, 7_0);
/*!
@property redirect
@abstract Set the value of this property to an instance of NSURLRequest indicating a redirection of the loading request to another URL. If no redirection is needed, leave the value of this property set to nil.
@discussion AVAssetResourceLoader supports redirects to HTTP URLs only. Redirects to other URLs will result in a loading failure.
*/
@property (nonatomic, copy, nullable) NSURLRequest *redirect NS_AVAILABLE(10_9, 7_0);
/*!
@method finishLoading
@abstract Causes the receiver to treat the processing of the request as complete.
@discussion If a dataRequest is present and the resource does not contain the full extent of the data that has been requested according to the values of the requestedOffset and requestedLength properties of the dataRequest, or if requestsAllDataToEndOfResource has a value of YES, you may invoke -finishLoading after you have provided as much of the requested data as the resource contains.
*/
- (void)finishLoading NS_AVAILABLE(10_9, 7_0);
/*!
@method finishLoadingWithError:
@abstract Causes the receiver to treat the request as having failed.
@param error
An instance of NSError indicating the reason for failure.
*/
- (void)finishLoadingWithError:(nullable NSError *)error;
@end
/*!
@class AVAssetResourceRenewalRequest
@abstract AVAssetResourceRenewalRequest encapsulates information about a resource request issued by a resource loader for the purpose of renewing a request previously issued.
@discussion
When an AVURLAsset needs to renew a resource (because contentInformationRequest.renewalDate has been set on a previous loading request), it asks its AVAssetResourceLoader object to assist. The resource loader encapsulates the request information by creating an instance of this object, which it then hands to its delegate for processing. The delegate uses the information in this object to perform the request and report on the success or failure of the operation.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAssetResourceRenewalRequest : AVAssetResourceLoadingRequest
@end
/*!
@class AVAssetResourceLoadingContentInformationRequest
@abstract An AVAssetResourceLoadingContentInformationRequest represents a query for essential information about a resource referenced by an asset resource loading request.
@discussion
When a resource loading delegate accepts responsibility for loading a resource by returning YES from its implementation of resourceLoader:shouldWaitForLoadingOfRequestedResource:, it must check whether the contentInformationRequest property of the AVAssetResourceLoadingRequest is not nil. Whenever the value is not nil, the request includes a query for the information that AVAssetResourceLoadingContentInformationRequest encapsulates. In response to such queries, the resource loading delegate should set the values of the content information request's properties appropriately before invoking the AVAssetResourceLoadingRequest method finishLoading.
When finishLoading is invoked, the values of the properties of its contentInformationRequest property will, in part, determine how the requested resource is processed. For example, if the requested resource's URL is the URL of an AVURLAsset and contentType is set by the resource loading delegate to a value that the underlying media system doesn't recognize as a supported media file type, operations on the AVURLAsset, such as playback, are likely to fail.
*/
@class AVAssetResourceLoadingContentInformationRequestInternal;
NS_CLASS_AVAILABLE(10_9, 7_0)
@interface AVAssetResourceLoadingContentInformationRequest : NSObject {
@private
AVAssetResourceLoadingContentInformationRequestInternal *_contentInformationRequest;
}
AV_INIT_UNAVAILABLE
/*!
@property contentType
@abstract A UTI that indicates the type of data contained by the requested resource.
@discussion Before you finish loading an AVAssetResourceLoadingRequest, if its contentInformationRequest is not nil, you should set the value of this property to a UTI indicating the type of data contained by the requested resource.
*/
@property (nonatomic, copy, nullable) NSString *contentType;
/*!
@property contentLength
@abstract Indicates the length of the requested resource, in bytes.
@discussion Before you finish loading an AVAssetResourceLoadingRequest, if its contentInformationRequest is not nil, you should set the value of this property to the number of bytes contained by the requested resource.
*/
@property (nonatomic) long long contentLength;
/*!
@property byteRangeAccessSupported
@abstract Indicates whether random access to arbitrary ranges of bytes of the resource is supported. Such support also allows portions of the resource to be requested more than once.
@discussion Before you finish loading an AVAssetResourceLoadingRequest, if its contentInformationRequest is not nil, you should set the value of this property to YES if you support random access to arbitrary ranges of bytes of the resource. If you do not set this property to YES for resources that must be loaded incrementally, loading of the resource may fail. Such resources include anything that contains media data.
*/
@property (nonatomic, getter=isByteRangeAccessSupported) BOOL byteRangeAccessSupported;
/*!
@property renewalDate
@abstract For resources that expire, the date at which a new AVAssetResourceLoadingRequest will be issued for a renewal of this resource, if the media system still requires it.
@discussion Before you finish loading an AVAssetResourceLoadingRequest, if the resource is prone to expiry you should set the value of this property to the date at which a renewal should be triggered. This value should be set sufficiently early enough to allow an AVAssetResourceRenewalRequest, delivered to your delegate via -resourceLoader:shouldWaitForRenewalOfRequestedResource:, to finish before the actual expiry time. Otherwise media playback may fail.
*/
@property (nonatomic, copy, nullable) NSDate *renewalDate NS_AVAILABLE(10_10, 8_0);
@end
/*!
@class AVAssetResourceLoadingDataRequest
@abstract An AVAssetResourceLoadingDataRequest is used to request data from a resource referenced by an AVAssetResourceLoadingRequest.
@discussion
The AVAssetResourceLoaderDelegate uses the AVAssetResourceLoadingDataRequest class to do the actual data reading, and its methods will be invoked, as necessary, to acquire data for the AVAssetResourceLoadingRequest instance.
When a resource loading delegate accepts responsibility for loading a resource by returning YES from its implementation of resourceLoader:shouldWaitForLoadingOfRequestedResource:, it must check whether the dataRequest property of the AVAssetResourceLoadingRequest instance is not nil. If it is not nil, the resource loading delegate is informed of the range of bytes within the resource that are required by the underlying media system. In response, the data is provided by one or more invocations of respondWithData: as needed for provision of the requested data. The data can be provided in increments determined by the resource loading delegate according to convenience or efficiency.
When the AVAssetResourceLoadingRequest method finishLoading is invoked, the data request is considered fully satisfied. If the entire range of bytes requested has not yet been provided, the underlying media system assumes that the resource's length is limited to the provided content.
*/
@class AVAssetResourceLoadingDataRequestInternal;
NS_CLASS_AVAILABLE(10_9, 7_0)
@interface AVAssetResourceLoadingDataRequest : NSObject {
@private
AVAssetResourceLoadingDataRequestInternal *_dataRequest;
}
AV_INIT_UNAVAILABLE
/*!
@property requestedOffset
@abstract The position within the resource of the first byte requested.
*/
@property (nonatomic, readonly) long long requestedOffset;
/*!
@property requestedLength
@abstract The length of the data requested.
@discussion Note that requestsAllDataToEndOfResource will be set to YES when the entire remaining length of the resource is being requested from requestedOffset to the end of the resource. This can occur even when the content length has not yet been reported by you via a prior finished loading request.
When requestsAllDataToEndOfResource has a value of YES, you should disregard the value of requestedLength and incrementally provide as much data starting from the requestedOffset as the resource contains, until you have provided all of the available data successfully and invoked -finishLoading, until you have encountered a failure and invoked -finishLoadingWithError:, or until you have received -resourceLoader:didCancelLoadingRequest: for the AVAssetResourceLoadingRequest from which the AVAssetResourceLoadingDataRequest was obtained.
When requestsAllDataToEndOfResource is YES and the content length has not yet been provided by you via a prior finished loading request, the value of requestedLength is set to NSIntegerMax. Starting in OS X 10.11 and iOS 9.0, in 32-bit applications requestedLength is also set to NSIntegerMax when all of the remaining resource data is being requested and the known length of the remaining data exceeds NSIntegerMax.
*/
@property (nonatomic, readonly) NSInteger requestedLength;
/*!
@property requestsAllDataToEndOfResource
@abstract Specifies that the entire remaining length of the resource from requestedOffset to the end of the resource is being requested.
@discussion When requestsAllDataToEndOfResource has a value of YES, you should disregard the value of requestedLength and incrementally provide as much data starting from the requestedOffset as the resource contains, until you have provided all of the available data successfully and invoked -finishLoading, until you have encountered a failure and invoked -finishLoadingWithError:, or until you have received -resourceLoader:didCancelLoadingRequest: for the AVAssetResourceLoadingRequest from which the AVAssetResourceLoadingDataRequest was obtained.
*/
@property (nonatomic, readonly) BOOL requestsAllDataToEndOfResource NS_AVAILABLE(10_11, 9_0);
/*!
@property currentOffset
@abstract The position within the resource of the next byte within the resource following the bytes that have already been provided via prior invocations of -respondWithData.
*/
@property (nonatomic, readonly) long long currentOffset;
/*!
@method respondWithData:
@abstract Provides data to the receiver.
@param data
An instance of NSData containing some or all of the requested bytes.
@discussion May be invoked multiple times on the same instance of AVAssetResourceLoadingDataRequest to provide the full range of requested data incrementally. Upon each invocation, the value of currentOffset will be updated to accord with the amount of data provided.
*/
- (void)respondWithData:(NSData *)data;
@end
@interface AVAssetResourceLoader (AVAssetResourceLoaderContentKeySupport)
/*!
@property preloadsEligibleContentKeys
@abstract When YES, eligible content keys will be loaded as eagerly as possible, potentially handled by the delegate. Setting to YES may result in network activity.
@discussion Any work done as a result of setting this property will be performed asynchronously.
*/
@property (nonatomic) BOOL preloadsEligibleContentKeys NS_AVAILABLE(10_11, 9_0);
@end
@interface AVAssetResourceLoadingRequest (AVAssetResourceLoadingRequestContentKeyRequestSupport)
/*!
@method streamingContentKeyRequestDataForApp:contentIdentifier:options:error:
@abstract Obtains a streaming content key request for a specific combination of application and content.
@param appIdentifier
An opaque identifier for the application. The value of this identifier depends on the particular system used to provide the decryption key.
@param contentIdentifier
An opaque identifier for the content. The value of this identifier depends on the particular system used to provide the decryption key.
@param options
Additional information necessary to obtain the key, or nil if none.
@param error
If obtaining the streaming content key request fails, will be set to an instance of NSError describing the failure.
@result The key request data that must be transmitted to the key vendor to obtain the content key.
*/
- (nullable NSData *)streamingContentKeyRequestDataForApp:(NSData *)appIdentifier contentIdentifier:(NSData *)contentIdentifier options:(nullable NSDictionary<NSString *, id> *)options error:(NSError * _Nullable * _Nullable)outError;
/*!
@method persistentContentKeyFromKeyVendorResponse:options:error:
@abstract Obtains a persistable content key from a context.
@param keyVendorResponse
The response returned from the key vendor as a result of a request generated from streamingContentKeyRequestDataForApp:contentIdentifier:options:error:.
@param options
Additional information necessary to obtain the persistable content key, or nil if none.
@param error
If obtaining the persistable content key fails, will be set to an instance of NSError describing the failure.
@result The persistable content key data that may be stored offline to answer future loading requests of the same content key.
@discussion The data returned from this method may be used to immediately satisfy an AVAssetResourceLoadingDataRequest, as well as any subsequent requests for the same key url. The value of AVAssetResourceLoadingContentInformationRequest.contentType must be set to AVStreamingKeyDeliveryPersistentContentKeyType when responding with data created with this method.
*/
- (nullable NSData *)persistentContentKeyFromKeyVendorResponse:(NSData *)keyVendorResponse options:(nullable NSDictionary<NSString *, id> *)options error:(NSError **)outError NS_AVAILABLE_IOS(9_0);
@end
// Options keys for use with -[AVAssetResourceLoadingRequest streamingContentKeyRequestDataForApp:contentIdentifier:trackID:options:error:]
/*!
@constant AVAssetResourceLoadingRequestStreamingContentKeyRequestRequiresPersistentKey
@abstract Specifies whether the content key request should require a persistable key to be returned from the key vendor. Value should be a NSNumber created with +[NSNumber numberWithBool:].
*/
AVF_EXPORT NSString *const AVAssetResourceLoadingRequestStreamingContentKeyRequestRequiresPersistentKey NS_AVAILABLE_IOS(9_0);
@interface AVAssetResourceLoadingRequest (AVAssetResourceLoadingRequestDeprecated)
/*!
@method finishLoadingWithResponse:data:redirect:
@abstract Causes the receiver to finish loading a resource that a delegate has previously assumed responsibility for loading by returning YES as the result of -resourceLoader:shouldWaitForLoadingOfRequestedResource:.
@param response
The NSURLResponse for the NSURLRequest of the receiver. Should be nil if no response is required.
@param data
An instance of NSData containing the data of the resource. Should be nil if no such data is available.
@param redirect
An instance of NSURLRequest indicating a redirect of the loading request. Should be nil if no redirect is needed.
@discussion This method is deprecated. Use the following methods instead.
-[AVAssetResourceLoadingRequest setResponse:] to set the response property,
-[AVAssetResourceLoadingRequest setRedirect:] to set the redirect property,
-[AVAssetResourceLoadingDataRequest respondWithData:] to provide data, and
-[AVAssetResourceLoadingRequest finishLoading] to indicate that loading is finished.
*/
- (void)finishLoadingWithResponse:(nullable NSURLResponse *)response data:(nullable NSData *)data redirect:(nullable NSURLRequest *)redirect NS_DEPRECATED_IOS(6_0, 7_0);
@end
NS_ASSUME_NONNULL_END

View file

@ -1,380 +0,0 @@
/*
File: AVAssetTrack.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
/*!
@class AVAssetTrack
@abstract An AVAssetTrack object provides provides the track-level inspection interface for all assets.
@discussion
AVAssetTrack adopts the AVAsynchronousKeyValueLoading protocol. Methods in the protocol should be used to access a track's properties without blocking the current thread. To cancel load requests for all keys of AVAssetTrack one must message the parent AVAsset object (for example, [track.asset cancelLoading])
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVAsynchronousKeyValueLoading.h>
#import <AVFoundation/AVAsset.h>
#import <AVFoundation/AVAssetTrackSegment.h>
#import <AVFoundation/AVMediaFormat.h>
#import <AVFoundation/AVMetadataFormat.h>
#import <CoreMedia/CMTimeRange.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAssetTrackInternal;
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAssetTrack : NSObject <NSCopying, AVAsynchronousKeyValueLoading>
{
@private
AVAssetTrackInternal *_track;
}
AV_INIT_UNAVAILABLE
/* provides a reference to the AVAsset of which the AVAssetTrack is a part */
@property (nonatomic, readonly, weak) AVAsset *asset;
/* indicates the persistent unique identifier for this track of the asset */
@property (nonatomic, readonly) CMPersistentTrackID trackID;
/* Note that cancellation of loading requests for all keys of AVAssetTrack must be made on the parent AVAsset, e.g. [[track.asset] cancelLoading] */
@end
@interface AVAssetTrack (AVAssetTrackBasicPropertiesAndCharacteristics)
/* indicates the media type for this track, e.g. AVMediaTypeVideo, AVMediaTypeAudio, etc., as defined in AVMediaFormat.h. */
@property (nonatomic, readonly) AVMediaType mediaType;
/* provides an array of CMFormatDescriptions
each of which indicates the format of media samples referenced by the track;
a track that presents uniform media, e.g. encoded according to the same encoding settings,
will provide an array with a count of 1 */
@property (nonatomic, readonly) NSArray *formatDescriptions;
/* Indicates whether the receiver is playable in the current environment; if YES, an AVPlayerItemTrack of an AVPlayerItem initialized with the receiver's asset can be enabled for playback. */
@property (nonatomic, readonly, getter=isPlayable) BOOL playable NS_AVAILABLE(10_8, 5_0);
/* Indicates whether the receiver is decodable in the current environment; if YES, the track can be decoded even though decoding may be too slow for real time playback. */
@property (nonatomic, readonly, getter=isDecodable) BOOL decodable NS_AVAILABLE(10_13, 11_0);
/* indicates whether the track is enabled according to state stored in its container or construct;
note that its presentation state can be changed from this default via AVPlayerItemTrack */
@property (nonatomic, readonly, getter=isEnabled) BOOL enabled;
/* indicates whether the track references sample data only within its storage container */
@property (nonatomic, readonly, getter=isSelfContained) BOOL selfContained;
/* indicates the total number of bytes of sample data required by the track */
@property (nonatomic, readonly) long long totalSampleDataLength;
/*!
@method hasMediaCharacteristic:
@abstract Reports whether the track references media with the specified media characteristic.
@param mediaCharacteristic
The media characteristic of interest, e.g. AVMediaCharacteristicVisual, AVMediaCharacteristicAudible, AVMediaCharacteristicLegible, etc.,
as defined above.
@result YES if the track references media with the specified characteristic, otherwise NO.
*/
- (BOOL)hasMediaCharacteristic:(AVMediaCharacteristic)mediaCharacteristic;
@end
@interface AVAssetTrack (AVAssetTrackTemporalProperties)
/* Indicates the timeRange of the track within the overall timeline of the asset;
a track with CMTIME_COMPARE_INLINE(timeRange.start, >, kCMTimeZero) will initially present an empty interval. */
@property (nonatomic, readonly) CMTimeRange timeRange;
/* indicates a timescale in which time values for the track can be operated upon without extraneous numerical conversion */
@property (nonatomic, readonly) CMTimeScale naturalTimeScale;
/* indicates the estimated data rate of the media data referenced by the track, in units of bits per second */
@property (nonatomic, readonly) float estimatedDataRate;
@end
@interface AVAssetTrack (AVAssetTrackLanguageProperties)
/* indicates the language associated with the track, as an ISO 639-2/T language code;
may be nil if no language is indicated */
@property (nonatomic, readonly, nullable) NSString *languageCode;
/* indicates the language tag associated with the track, as an IETF BCP 47 (RFC 4646) language identifier;
may be nil if no language tag is indicated */
@property (nonatomic, readonly, nullable) NSString *extendedLanguageTag;
@end
@interface AVAssetTrack (AVAssetTrackPropertiesForVisualCharacteristic)
/* indicates the natural dimensions of the media data referenced by the track as a CGSize */
@property (nonatomic, readonly) CGSize naturalSize;
/* indicates the transform specified in the track's storage container as the preferred transformation of the visual media data for display purposes;
its value is often but not always CGAffineTransformIdentity */
@property (nonatomic, readonly) CGAffineTransform preferredTransform;
@end
@interface AVAssetTrack (AVAssetTrackPropertiesForAudibleCharacteristic)
/* indicates the volume specified in the track's storage container as the preferred volume of the audible media data */
@property (nonatomic, readonly) float preferredVolume;
@end
@interface AVAssetTrack (AVAssetTrackPropertiesForFrameBasedCharacteristic)
/*!
@property nominalFrameRate
@abstract For tracks that carry a full frame per media sample, indicates the frame rate of the track in units of frames per second.
@discussion For field-based video tracks that carry one field per media sample, the value of this property is the field rate, not the frame rate.
*/
@property (nonatomic, readonly) float nominalFrameRate;
/* indicates the minimum duration of the track's frames; the value will be kCMTimeInvalid if the minimum frame duration is not known or cannot be calculated */
@property (nonatomic, readonly) CMTime minFrameDuration NS_AVAILABLE(10_10, 7_0);
/*!
@property requiresFrameReordering
@abstract Indicates whether samples in the track may have different values for their presentation and decode timestamps.
*/
@property (nonatomic, readonly) BOOL requiresFrameReordering NS_AVAILABLE(10_10, 8_0);
@end
@interface AVAssetTrack (AVAssetTrackSegments)
/* Provides an array of AVAssetTrackSegments with time mappings from the timeline of the track's media samples to the timeline of the track.
Empty edits, i.e. timeRanges for which no media data is available to be presented, have a value of AVAssetTrackSegment.empty equal to YES. */
@property (nonatomic, copy, readonly) NSArray<AVAssetTrackSegment *> *segments;
/*!
@method segmentForTrackTime:
@abstract Supplies the AVAssetTrackSegment from the segments array with a target timeRange that either contains the specified track time or is the closest to it among the target timeRanges of the track's segments.
@param trackTime
The trackTime for which an AVAssetTrackSegment is requested.
@result An AVAssetTrackSegment.
@discussion If the trackTime does not map to a sample presentation time (e.g. it's outside the track's timeRange), the segment closest in time to the specified trackTime is returned.
*/
- (nullable AVAssetTrackSegment *)segmentForTrackTime:(CMTime)trackTime;
/*!
@method samplePresentationTimeForTrackTime:
@abstract Maps the specified trackTime through the appropriate time mapping and returns the resulting sample presentation time.
@param trackTime
The trackTime for which a sample presentation time is requested.
@result A CMTime; will be invalid if the trackTime is out of range
*/
- (CMTime)samplePresentationTimeForTrackTime:(CMTime)trackTime;
@end
@interface AVAssetTrack (AVAssetTrackMetadataReading)
// high-level access to selected metadata of common interest
/* provides access to an array of AVMetadataItems for each common metadata key for which a value is available */
@property (nonatomic, readonly) NSArray<AVMetadataItem *> *commonMetadata;
/* Provides access to an array of AVMetadataItems for all metadata identifiers for which a value is available; items can be filtered according to language via +[AVMetadataItem metadataItemsFromArray:filteredAndSortedAccordingToPreferredLanguages:] and according to identifier via +[AVMetadataItem metadataItemsFromArray:filteredByIdentifier:].
*/
@property (nonatomic, readonly) NSArray<AVMetadataItem *> *metadata NS_AVAILABLE(10_10, 8_0);
/* provides an NSArray of NSStrings, each representing a format of metadata that's available for the track (e.g. QuickTime userdata, etc.)
Metadata formats are defined in AVMetadataItem.h. */
@property (nonatomic, readonly) NSArray<AVMetadataFormat> *availableMetadataFormats;
/*!
@method metadataForFormat:
@abstract Provides an NSArray of AVMetadataItems, one for each metadata item in the container of the specified format.
@param format
The metadata format for which items are requested.
@result An NSArray containing AVMetadataItems.
@discussion Becomes callable without blocking when the key @"availableMetadataFormats" has been loaded
*/
- (NSArray<AVMetadataItem *> *)metadataForFormat:(AVMetadataFormat)format;
@end
@interface AVAssetTrack (AVAssetTrackTrackAssociations)
/*!
@typedef AVTrackAssociationType
@abstract
The type of a track association.
*/
typedef NSString * AVTrackAssociationType NS_STRING_ENUM;
/*
@constant AVTrackAssociationTypeAudioFallback
@abstract Indicates an association between an audio track with another audio track that contains the same content but is typically encoded in a different format that's more widely supported, used to nominate a track that should be used in place of an unsupported track.
@discussion
Associations of type AVTrackAssociationTypeAudioFallback are supported only between audio tracks. This association is not symmetric; when used with -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:], the receiver should be an instance of AVAssetWriterInput with a corresponding track that has content that's less widely supported, and the input parameter should be an instance of AVAssetWriterInput with a corresponding track that has content that's more widely supported.
Example: Using AVTrackAssociationTypeAudioFallback, a stereo audio track with media subtype kAudioFormatMPEG4AAC could be nominated as the "fallback" for an audio track encoding the same source material but with media subtype kAudioFormatAC3 and a 5.1 channel layout. This would ensure that all clients are capable of playing back some form of the audio.
*/
AVF_EXPORT AVTrackAssociationType const AVTrackAssociationTypeAudioFallback NS_AVAILABLE(10_9, 7_0);
/*
@constant AVTrackAssociationTypeChapterList
@abstract Indicates an association between a track with another track that contains chapter information. The track containing chapter information may be a text track, a video track, or a timed metadata track.
@discussion
This association is not symmetric; when used with -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:], the receiver should be an instance of AVAssetWriterInput with a corresponding track that has renderable content while the input parameter should be an instance of AVAssetWriterInput with a corresponding track that contains chapter metadata.
*/
AVF_EXPORT AVTrackAssociationType const AVTrackAssociationTypeChapterList NS_AVAILABLE(10_9, 7_0);
/*
@constant AVTrackAssociationTypeForcedSubtitlesOnly
@abstract Indicates an association between a subtitle track typically containing both forced and non-forced subtitles with another subtitle track that contains only forced subtitles, for use when the user indicates that only essential subtitles should be displayed. When such an association is established, the forced subtitles in both tracks are expected to present the same content in the same language but may have different timing.
@discussion
Associations of type AVTrackAssociationTypeForcedSubtitlesOnly are supported only between subtitle tracks. This association is not symmetric; when used with -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:], the receiver should be an instance of AVAssetWriterInput with a corresponding subtitle track that contains non-forced subtitles, and the input parameter should be an instance of AVAssetWriterInput with a corresponding subtitle track that contains forced subtitles only.
*/
AVF_EXPORT AVTrackAssociationType const AVTrackAssociationTypeForcedSubtitlesOnly NS_AVAILABLE(10_9, 7_0);
/*
@constant AVTrackAssociationTypeSelectionFollower
@abstract Indicates an association between a pair of tracks that specifies that, when the first of the pair is selected, the second of the pair should be considered an appropriate default for selection also. Example: a subtitle track in the same language as an audio track may be associated with that audio track using AVTrackAssociationTypeSelectionFollower, to indicate that selection of the subtitle track, in the absence of a directive for subtitle selection from the user, can "follow" the selection of the audio track.
@discussion
This association is not symmetric; when used with -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:], the input parameter should be an instance of AVAssetWriterInput whose selection may depend on the selection of the receiver. In the example above, the receiver would be the instance of AVAssetWriterInput corresponding with the audio track and the input parameter would be the instance of AVAssetWriterInput corresponding with the subtitle track.
*/
AVF_EXPORT AVTrackAssociationType const AVTrackAssociationTypeSelectionFollower NS_AVAILABLE(10_9, 7_0);
/*
@constant AVTrackAssociationTypeTimecode
@abstract Indicates an association between a track with another track that contains timecode information. The track containing timecode information should be a timecode track.
@discussion
This association is not symmetric; when used with -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:], the receiver should be an instance of AVAssetWriterInput with a corresponding track that may be a video track or an audio track while the input parameter should be an instance of AVAssetWriterInput with a corresponding timecode track.
*/
AVF_EXPORT AVTrackAssociationType const AVTrackAssociationTypeTimecode NS_AVAILABLE(10_9, 7_0);
/*
@constant AVTrackAssociationTypeMetadataReferent
@abstract Indicates an association between a metadata track and the track that's described or annotated via the contents of the metadata track.
@discussion
This track association is optional for AVAssetTracks with the mediaType AVMediaTypeMetadata. When a metadata track lacks this track association, its contents are assumed to describe or annotate the asset as a whole.
This association is not symmetric; when used with -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:], the receiver should be an instance of AVAssetWriterInput with mediaType AVMediaTypeMetadata while the input parameter should be an instance of AVAssetWriterInput that's used to create the track to which the contents of the receiver's corresponding metadata track refer.
*/
AVF_EXPORT AVTrackAssociationType const AVTrackAssociationTypeMetadataReferent NS_AVAILABLE(10_10, 8_0);
/* Provides an NSArray of NSStrings, each representing a type of track association that the receiver has with one or more of the other tracks of the asset (e.g. AVTrackAssociationTypeChapterList, AVTrackAssociationTypeTimecode, etc.).
Track association types are defined immediately above. */
@property (nonatomic, readonly) NSArray<AVTrackAssociationType> *availableTrackAssociationTypes NS_AVAILABLE(10_9, 7_0);
/*!
@method associatedTracksOfType:
@abstract Provides an NSArray of AVAssetTracks, one for each track associated with the receiver with the specified type of track association.
@param trackAssociationType
The type of track association for which associated tracks are requested.
@result An NSArray containing AVAssetTracks; may be empty if there is no associated tracks of the specified type.
@discussion Becomes callable without blocking when the key @"availableTrackAssociationTypes" has been loaded.
*/
- (NSArray<AVAssetTrack *> *)associatedTracksOfType:(AVTrackAssociationType)trackAssociationType NS_AVAILABLE(10_9, 7_0);
@end
#if !TARGET_OS_IPHONE
@class AVSampleCursor;
@interface AVAssetTrack (AVAssetTrackSampleCursorProvision)
/* Indicates whether the receiver can provide instances of AVSampleCursor for traversing its media samples and discovering information about them. */
@property (nonatomic, readonly) BOOL canProvideSampleCursors NS_AVAILABLE_MAC(10_10);
/*!
@method makeSampleCursorWithPresentationTimeStamp:
@abstract Creates an instance of AVSampleCursor and positions it at or near the specified presentation timestamp.
@param presentationTimeStamp
The desired initial presentation timestamp of the returned AVSampleCursor.
@result An instance of AVSampleCursor.
@discussion If the receiver's asset has a value of YES for providesPreciseDurationAndTiming, the sample cursor will be accurately positioned at the receiver's last media sample with presentation timestamp less than or equal to the desired timestamp, or, if there are no such samples, the first sample in presentation order.
If the receiver's asset has a value of NO for providesPreciseDurationAndTiming, and it is prohibitively expensive to locate the precise sample at the desired timestamp, the sample cursor may be approximately positioned.
*/
- (nullable AVSampleCursor *)makeSampleCursorWithPresentationTimeStamp:(CMTime)presentationTimeStamp NS_AVAILABLE_MAC(10_10);
/*!
@method makeSampleCursorAtFirstSampleInDecodeOrder:
@abstract Creates an instance of AVSampleCursor and positions it at the receiver's first media sample in decode order.
@result An instance of AVSampleCursor.
*/
- (nullable AVSampleCursor *)makeSampleCursorAtFirstSampleInDecodeOrder NS_AVAILABLE_MAC(10_10);
/*!
@method makeSampleCursorAtLastSampleInDecodeOrder:
@abstract Creates an instance of AVSampleCursor and positions it at the receiver's last media sample in decode order.
@result An instance of AVSampleCursor.
*/
- (nullable AVSampleCursor *)makeSampleCursorAtLastSampleInDecodeOrder NS_AVAILABLE_MAC(10_10);
@end
#endif // !TARGET_OS_IPHONE
#pragma mark --- AVAssetTrack change notifications ---
/*
AVAssetTrack change notifications are posted by instances of mutable subclasses, AVMutableCompositionTrack and AVMutableMovieTrack.
Some of the notifications are also posted by instances of dynamic subclasses, AVFragmentedAssetTrack and AVFragmentedMovieTrack, but these are capable of changing only in well-defined ways and only under specific conditions that you control.
*/
/*!
@constant AVAssetTrackTimeRangeDidChangeNotification
@abstract Posted when the timeRange of an AVFragmentedAssetTrack changes while the associated instance of AVFragmentedAsset is being minded by an AVFragmentedAssetMinder, but only for changes that occur after the status of the value of @"timeRange" has reached AVKeyValueStatusLoaded.
*/
AVF_EXPORT NSString *const AVAssetTrackTimeRangeDidChangeNotification NS_AVAILABLE(10_11, 9_0);
/*!
@constant AVAssetTrackSegmentsDidChangeNotification
@abstract Posted when the array of segments of an AVFragmentedAssetTrack changes while the associated instance of AVFragmentedAsset is being minded by an AVFragmentedAssetMinder, but only for changes that occur after the status of the value of @"segments" has reached AVKeyValueStatusLoaded.
*/
AVF_EXPORT NSString *const AVAssetTrackSegmentsDidChangeNotification NS_AVAILABLE(10_11, 9_0);
/*!
@constant AVAssetTrackTrackAssociationsDidChangeNotification
@abstract Posted when the collection of track associations of an AVAssetTrack changes, but only for changes that occur after the status of the value of @"availableTrackAssociationTypes" has reached AVKeyValueStatusLoaded.
*/
AVF_EXPORT NSString *const AVAssetTrackTrackAssociationsDidChangeNotification NS_AVAILABLE(10_11, 9_0);
#pragma mark --- AVFragmentedAssetTrack ---
/*!
@class AVFragmentedAssetTrack
@abstract A subclass of AVAssetTrack for handling tracks of fragmented assets. An AVFragmentedAssetTrack is capable of changing the values of certain of its properties, if its parent asset is associated with an instance of AVFragmentedAssetMinder when one or more fragments are appended to the underlying media resource.
@discussion While its parent asset is associated with an AVFragmentedAssetMinder, AVFragmentedAssetTrack posts AVAssetTrackTimeRangeDidChangeNotification and AVAssetTrackSegmentsDidChangeNotification whenever new fragments are detected, as appropriate.
*/
@class AVFragmentedAssetTrackInternal;
NS_CLASS_AVAILABLE_MAC(10_11)
@interface AVFragmentedAssetTrack : AVAssetTrack
{
@private
AVFragmentedAssetTrackInternal *_fragmentedAssetTrack __attribute__((unused));
}
@end
NS_ASSUME_NONNULL_END

View file

@ -1,50 +0,0 @@
/*
File: AVAssetTrackGroup.h
Framework: AVFoundation
Copyright 2010-2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@class AVAssetTrackGroupInternal;
/*!
@class AVAssetTrackGroup
@abstract
A class whose instances describe a group of tracks in an asset.
@discussion
Instances of AVAssetTrackGroup describe a single group of related tracks in an asset. For example, a track group can
describe a set of alternate tracks, which are tracks containing variations of the same content, such as content
translated into different languages, out of which only one track should be played at a time.
Clients can inspect the track groups contained in an AVAsset by loading and obtaining the value of its trackGroups property.
*/
NS_CLASS_AVAILABLE(10_9, 7_0)
@interface AVAssetTrackGroup : NSObject <NSCopying>
{
@private
AVAssetTrackGroupInternal *_assetTrackGroup;
}
/*!
@property trackIDs
@abstract
The IDs of all of the tracks in the group.
@discussion
The value of this property is an NSArray of NSNumbers interpreted as CMPersistentTrackIDs, one for each track in the
group.
*/
@property (nonatomic, readonly) NSArray<NSNumber *> *trackIDs;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,39 +0,0 @@
/*
File: AVAssetTrackSegment.h
Framework: AVFoundation
Copyright 2010-2015 Apple Inc. All rights reserved.
*/
/*!
@class AVAssetTrackSegment
@abstract AVAssetTrackSegment represents a segment of an AVAssetTrack, comprising of a
time mapping from the source to the asset track timeline.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMTimeRange.h>
NS_ASSUME_NONNULL_BEGIN
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAssetTrackSegment : NSObject
{
@private
CMTimeMapping _timeMapping;
}
AV_INIT_UNAVAILABLE
/* indicates the timeRange of the track of the container file of the media presented by the AVAssetTrackSegment */
@property (nonatomic, readonly) CMTimeMapping timeMapping;
/* indicates whether the AVAssetTrackSegment is an empty segment */
@property (nonatomic, readonly, getter=isEmpty) BOOL empty;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,522 +0,0 @@
/*
File: AVAssetWriter.h
Framework: AVFoundation
Copyright 2010-2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVMediaFormat.h>
#import <AVFoundation/AVMediaSelectionGroup.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMBase.h>
#import <CoreMedia/CMTime.h>
#import <CoreMedia/CMSampleBuffer.h>
@class AVAssetWriterInput;
@class AVMetadataItem;
NS_ASSUME_NONNULL_BEGIN
/*!
@enum AVAssetWriterStatus
@abstract
These constants are returned by the AVAssetWriter status property to indicate whether it can successfully write samples to its output file.
@constant AVAssetWriterStatusUnknown
Indicates that the status of the asset writer is not currently known.
@constant AVAssetWriterStatusWriting
Indicates that the asset writer is successfully writing samples to its output file.
@constant AVAssetWriterStatusCompleted
Indicates that the asset writer has successfully written all samples following a call to finishWriting.
@constant AVAssetWriterStatusFailed
Indicates that the asset writer can no longer write samples to its output file because of an error. The error is described by the value of the asset writer's error property.
@constant AVAssetWriterStatusCancelled
Indicates that the asset writer can no longer write samples because writing was canceled with the cancelWriting method.
*/
typedef NS_ENUM(NSInteger, AVAssetWriterStatus) {
AVAssetWriterStatusUnknown = 0,
AVAssetWriterStatusWriting,
AVAssetWriterStatusCompleted,
AVAssetWriterStatusFailed,
AVAssetWriterStatusCancelled
};
@class AVAssetWriterInternal;
/*!
@class AVAssetWriter
@abstract
AVAssetWriter provides services for writing media data to a new file,
@discussion
Instances of AVAssetWriter can write media to new files in formats such as the QuickTime movie file format or the MPEG-4 file format. AVAssetWriter has support for automatic interleaving of media data for multiple concurrent tracks. Source media data can be obtained from instances of AVAssetReader for one or more assets or from other sources outside of AVFoundation.
Instances of AVAssetWriter can re-encode media samples as they are written. Instances of AVAssetWriter can also optionally write metadata collections to the output file.
A single instance of AVAssetWriter can be used once to write to a single file. Clients that wish to write to files multiple times must use a new instance of AVAssetWriter each time.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetWriter : NSObject
{
@private
AVAssetWriterInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetWriterWithURL:fileType:error:
@abstract
Returns an instance of AVAssetWriter configured to write to a file in a specified container format.
@param URL
The location of the file to be written. The URL must be a file URL.
@param fileType
A UTI indicating the format of the file to be written.
@param outError
On return, if initialization of the AVAssetWriter fails, points to an NSError describing the nature of the failure.
@result
An instance of AVAssetWriter.
@discussion
Writing will fail if a file already exists at the specified URL.
UTIs for container formats that can be written are declared in AVMediaFormat.h.
*/
+ (nullable instancetype)assetWriterWithURL:(NSURL *)outputURL fileType:(AVFileType)outputFileType error:(NSError * _Nullable * _Nullable)outError;
/*!
@method initWithURL:fileType:error:
@abstract
Creates an instance of AVAssetWriter configured to write to a file in a specified container format.
@param URL
The location of the file to be written. The URL must be a file URL.
@param fileType
A UTI indicating the format of the file to be written.
@param outError
On return, if initialization of the AVAssetWriter fails, points to an NSError describing the nature of the failure.
@result
An instance of AVAssetWriter.
@discussion
Writing will fail if a file already exists at the specified URL.
UTIs for container formats that can be written are declared in AVMediaFormat.h.
*/
- (nullable instancetype)initWithURL:(NSURL *)outputURL fileType:(AVFileType)outputFileType error:(NSError * _Nullable * _Nullable)outError NS_DESIGNATED_INITIALIZER;
/*!
@property outputURL
@abstract
The location of the file for which the instance of AVAssetWriter was initialized for writing.
@discussion
You may use UTTypeCopyPreferredTagWithClass(outputFileType, kUTTagClassFilenameExtension) to obtain an appropriate path extension for the outputFileType you have specified. For more information about UTTypeCopyPreferredTagWithClass and kUTTagClassFilenameExtension, on iOS see <MobileCoreServices/UTType.h> and on Mac OS X see <LaunchServices/UTType.h>.
*/
@property (nonatomic, copy, readonly) NSURL *outputURL;
/*!
@property outputFileType
@abstract
The UTI of the file format of the file for which the instance of AVAssetWriter was initialized for writing.
*/
@property (nonatomic, copy, readonly) AVFileType outputFileType;
/*!
@property availableMediaTypes
@abstract
The media types for which inputs can be added to the receiver.
@discussion
Some media types may not be accepted within the file format with which an AVAssetWriter was initialized.
*/
@property (nonatomic, readonly) NSArray<AVMediaType> *availableMediaTypes;
/*!
@property status
@abstract
The status of writing samples to the receiver's output file.
@discussion
The value of this property is an AVAssetWriterStatus that indicates whether writing is in progress, has completed successfully, has been canceled, or has failed. Clients of AVAssetWriterInput objects should check the value of this property after appending samples fails to determine why no more samples could be written. This property is thread safe.
*/
@property (readonly) AVAssetWriterStatus status;
/*!
@property error
@abstract
If the receiver's status is AVAssetWriterStatusFailed, this describes the error that caused the failure.
@discussion
The value of this property is an NSError that describes what caused the receiver to no longer be able to write to its output file. If the receiver's status is not AVAssetWriterStatusFailed, the value of this property is nil. This property is thread safe.
*/
@property (readonly, nullable) NSError *error;
/*!
@property metadata
@abstract
A collection of metadata to be written to the receiver's output file.
@discussion
The value of this property is an array of AVMetadataItem objects representing the collection of top-level metadata to be written in the output file.
This property cannot be set after writing has started.
*/
@property (nonatomic, copy) NSArray<AVMetadataItem *> *metadata;
/*!
@property shouldOptimizeForNetworkUse
@abstract
Specifies whether the output file should be written in way that makes it more suitable for playback over a network
@discussion
When the value of this property is YES, the output file will be written in such a way that playback can start after only a small amount of the file is downloaded.
This property cannot be set after writing has started.
*/
@property (nonatomic) BOOL shouldOptimizeForNetworkUse;
/*!
@property directoryForTemporaryFiles
@abstract
Specifies a directory that is suitable for containing temporary files generated during the process of writing an asset.
@discussion
AVAssetWriter may need to write temporary files when configured in certain ways, such as when performsMultiPassEncodingIfSupported is set to YES on one or more of its inputs. This property can be used to control where in the filesystem those temporary files are created. All temporary files will be deleted when asset writing is completed, is canceled, or fails.
When the value of this property is nil, the asset writer will choose a suitable location when writing temporary files. The default value is nil.
This property cannot be set after writing has started. The asset writer will fail if a file cannot be created in this directory (for example, due to insufficient permissions).
*/
@property (nonatomic, copy, nullable) NSURL *directoryForTemporaryFiles NS_AVAILABLE(10_10, 8_0);
/*!
@property inputs
@abstract
The inputs from which the asset writer receives media data.
@discussion
The value of this property is an NSArray containing concrete instances of AVAssetWriterInput. Inputs can be added to the receiver using the addInput: method.
*/
@property (nonatomic, readonly) NSArray<AVAssetWriterInput *> *inputs;
/*!
@method canApplyOutputSettings:forMediaType:
@abstract
Tests whether output settings for a specific media type are supported by the receiver's file format.
@param outputSettings
The output settings that are to be tested.
@param mediaType
The media type for which the output settings are to be tested. Media types are defined in AVMediaFormat.h.
@result
A BOOL indicating whether the given output settings can be used for the given media type.
@discussion
This method determines whether the output settings for the specified media type can be used with the receiver's file format. For example, video compression settings that specify H.264 compression are not compatible with file formats that cannot contain H.264-compressed video.
Attempting to add an input with output settings and a media type for which this method returns NO will cause an exception to be thrown.
*/
- (BOOL)canApplyOutputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings forMediaType:(AVMediaType)mediaType;
/*!
@method canAddInput:
@abstract
Tests whether an input can be added to the receiver.
@param input
The AVAssetWriterInput object to be tested.
@result
A BOOL indicating whether the input can be added to the receiver.
@discussion
An input that accepts media data of a type that is not compatible with the receiver, or with output settings that are not compatible with the receiver, cannot be added.
*/
- (BOOL)canAddInput:(AVAssetWriterInput *)input;
/*!
@method addInput:
@abstract
Adds an input to the receiver.
@param input
The AVAssetWriterInput object to be added.
@discussion
Inputs are created with a media type and output settings. These both must be compatible with the receiver.
Inputs cannot be added after writing has started.
*/
- (void)addInput:(AVAssetWriterInput *)input;
/*!
@method startWriting
@abstract
Prepares the receiver for accepting input and for writing its output to its output file.
@result
A BOOL indicating whether writing successfully started.
@discussion
This method must be called after all inputs have been added and other configuration properties have been set in order to tell the receiver to prepare for writing. After this method is called, clients can start writing sessions using startSessionAtSourceTime: and can write media samples using the methods provided by each of the receiver's inputs.
If writing cannot be started, this method returns NO. Clients can check the values of the status and error properties for more information on why writing could not be started.
On iOS, if the status of an AVAssetWriter is AVAssetWriterStatusWriting when the client app goes into the background, its status will change to AVAssetWriterStatusFailed and appending to any of its inputs will fail. You may want to use -[UIApplication beginBackgroundTaskWithExpirationHandler:] to avoid being interrupted in the middle of a writing session and to finish writing the data that has already been appended. For more information about executing code in the background, see the iOS Application Programming Guide.
*/
- (BOOL)startWriting;
/*!
@method startSessionAtSourceTime:
@abstract
Initiates a sample-writing session for the receiver.
@param startTime
The starting asset time for the sample-writing session, in the timeline of the source samples.
@discussion
Sequences of sample data appended to the asset writer inputs are considered to fall within "sample-writing sessions", initiated with this method. Accordingly, this method must be called after writing has started (using -startWriting) but before any sample data is appended to the receiver's inputs.
Each writing session has a start time which, where allowed by the file format being written, defines the mapping from the timeline of source samples to the timeline of the written file. In the case of the QuickTime movie file format, the first session begins at movie time 0, so a sample appended with timestamp T will be played at movie time (T-startTime). Samples with timestamps earlier than startTime will still be added to the output file but will be edited out (i.e. not presented during playback). If the earliest appended sample for an input has a timestamp later than than startTime, an empty edit will be inserted to preserve synchronization between tracks of the output asset.
To end the session started by use of this method, use -endSessionAtSourceTime: or -finishWritingWithCompletionHandler:. It is an error to invoke -startSessionAtSourceTime: twice in a row without invoking -endSessionAtSourceTime: in between.
NOTE: Multiple sample-writing sessions are currently not supported. It is an error to call -startSessionAtSourceTime: a second time after calling -endSessionAtSourceTime:.
*/
- (void)startSessionAtSourceTime:(CMTime)startTime;
/*!
@method endSessionAtSourceTime:
@abstract
Concludes a sample-writing session.
@param endTime
The ending asset time for the sample-writing session, in the timeline of the source samples.
@discussion
Call this method to complete a session started with -startSessionAtSourceTime:.
The endTime defines the moment on the timeline of source samples at which the session ends. In the case of the QuickTime movie file format, each sample-writing session's startTime...endTime pair corresponds to a period of movie time into which the session's samples are inserted. Samples with timestamps that are later than the session end time will still be added to the written file but will be edited out (i.e. not presented during playback). So if the first session has duration D1 = endTime - startTime, it will be inserted into the written file at time 0 through D1; the second session would be inserted into the written file at time D1 through D1+D2, etc. It is legal to have a session with no samples; this will cause creation of an empty edit of the prescribed duration.
It is not mandatory to call -endSessionAtSourceTime:; if -finishWritingWithCompletionHandler: is called without first invoking -endSessionAtSourceTime:, the session's effective end time will be the latest end timestamp of the session's appended samples (i.e. no samples will be edited out at the end).
It is an error to append samples outside of a sample-writing session. To append more samples after invoking -endSessionAtSourceTime:, you must first start a new session using -startSessionAtSourceTime:.
NOTE: Multiple sample-writing sessions are currently not supported. It is an error to call -startSessionAtSourceTime: a second time after calling -endSessionAtSourceTime:.
*/
- (void)endSessionAtSourceTime:(CMTime)endTime;
/*!
@method cancelWriting
@abstract
Cancels the creation of the output file.
@discussion
If the status of the receiver is "failed" or "completed," -cancelWriting is a no-op. Otherwise, this method will block until writing is canceled.
If an output file was created by the receiver during the writing process, -cancelWriting will delete the file.
This method should not be called concurrently with -[AVAssetWriterInput appendSampleBuffer:] or -[AVAssetWriterInputPixelBufferAdaptor appendPixelBuffer:withPresentationTime:].
*/
- (void)cancelWriting;
/*!
@method finishWriting
@abstract
Completes the writing of the output file.
@result
A BOOL indicating whether writing successfully finished.
@discussion
This method is deprecated. Use finishWritingWithCompletionHandler: instead.
This method will block until writing is finished. When this method returns successfully, the file being written by the receiver is complete and ready to use.
Because this method is blocking and can take a long time to execute (especially with shouldOptimizeForNetworkUse set to YES), it should not be called from the main thread. Doing so can cause the finishWriting operation to fail.
If writing cannot be finished, this method returns NO. Clients can check the values of the status and error properties for more information on why writing could not be finished.
This method should not be called concurrently with -[AVAssetWriterInput appendSampleBuffer:] or -[AVAssetWriterInputPixelBufferAdaptor appendPixelBuffer:withPresentationTime:].
*/
- (BOOL)finishWriting NS_DEPRECATED(10_7, 10_9, 4_1, 6_0);
/*!
@method finishWritingWithCompletionHandler:
@abstract
Marks all unfinished inputs as finished and completes the writing of the output file.
@discussion
This method returns immediately and causes its work to be performed asynchronously.
When the writing of the output file is finished, or if a failure or a cancellation occurs in the meantime, the specified handler will be invoked to indicate completion of the operation. To determine whether the operation succeeded, your handler can check the value of AVAssetWriter.status. If the status is AVAssetWriterStatusFailed, AVAsset.error will contain an instance of NSError that describes the failure.
To guarantee that all sample buffers are successfully written, ensure all calls to -[AVAssetWriterInput appendSampleBuffer:] or -[AVAssetWriterInputPixelBufferAdaptor appendPixelBuffer:withPresentationTime:] have returned before invoking this method.
*/
- (void)finishWritingWithCompletionHandler:(void (^)(void))handler NS_AVAILABLE(10_9, 6_0);
@end
@interface AVAssetWriter (AVAssetWriterFileTypeSpecificProperties)
/*!
@property movieFragmentInterval
@abstract
For file types that support movie fragments, specifies the frequency at which movie fragments should be written.
@discussion
When movie fragments are used, a partially written asset whose writing is unexpectedly interrupted can be successfully opened and played up to multiples of the specified time interval. The default value of this property is kCMTimeInvalid, which indicates that movie fragments should not be used.
This property cannot be set after writing has started.
*/
@property (nonatomic) CMTime movieFragmentInterval;
/*!
@property overallDurationHint
@abstract
For file types that support movie fragments, provides a hint of the final duration of the file to be written
@discussion
The value of this property must be a nonnegative, numeric CMTime. Alternatively, if the value of this property is an invalid CMTime (e.g. kCMTimeInvalid), no overall duration hint will be written to the file. The default value is kCMTimeInvalid.
This property is currently ignored if movie fragments are not being written. Use the movieFragmentInterval property to enable movie fragments.
This property cannot be set after writing has started.
*/
@property (nonatomic) CMTime overallDurationHint;
/*!
@property movieTimeScale
@abstract
For file types that contain a 'moov' atom, such as QuickTime Movie files, specifies the asset-level time scale to be used.
@discussion
The default value is 0, which indicates that the receiver should choose a convenient value, if applicable.
This property cannot be set after writing has started.
*/
@property (nonatomic) CMTimeScale movieTimeScale NS_AVAILABLE(10_7, 4_3);
@end
@class AVAssetWriterInputGroup;
@interface AVAssetWriter (AVAssetWriterInputGroups)
/*!
@method canAddInputGroup:
@abstract
Tests whether an input group can be added to the receiver.
@param inputGroup
The AVAssetWriterInputGroup object to be tested.
@result
A BOOL indicating whether the input group can be added to the receiver.
@discussion
If outputFileType specifies a container format that does not support mutually exclusive relationships among tracks, or if the specified instance of AVAssetWriterInputGroup contains inputs with media types that cannot be related, the group cannot be added to the AVAssetWriter.
*/
- (BOOL)canAddInputGroup:(AVAssetWriterInputGroup *)inputGroup NS_AVAILABLE(10_9, 7_0);
/*
@method addInputGroup:
@abstract
Adds an instance of AVAssetWriterInputGroup to the AVAssetWriter. The AVAssetWriter will mark the tracks associated with grouped inputs as mutually exclusive to each other for playback or other processing, if the output container format supports mutually exlusive relationships among tracks.
@param inputGroup
The collection of AVAssetWriterInputs to be grouped together.
@discussion
When an input group is added to an AVAssetWriter, the value of marksOutputTrackAsEnabled will automatically be set to YES for the default input and set to NO for all of the other inputs in the group.
Input groups cannot be added after writing has started.
*/
- (void)addInputGroup:(AVAssetWriterInputGroup *)inputGroup NS_AVAILABLE(10_9, 7_0);
/*!
@property inputGroups
@abstract
The instances of AVAssetWriterInputGroup that have been added to the AVAssetWriter.
@discussion
The value of this property is an NSArray containing concrete instances of AVAssetWriterInputGroup. Input groups can be added to the receiver using the addInputGroup: method.
*/
@property (nonatomic, readonly) NSArray<AVAssetWriterInputGroup *> *inputGroups NS_AVAILABLE(10_9, 7_0);
@end
@class AVAssetWriterInputGroupInternal;
/*
@class AVAssetWriterInputGroup
@abstract Associates tracks corresponding to inputs with each other in a mutually exclusive relationship.
@discussion
This class is used to associate tracks corresponding to multiple AVAssetWriterInputs as mutually exclusive to each other for playback or other processing. For example, if you are creating an asset with multiple audio tracks using different spoken languages, only one of which should be played at a time, group the inputs corresponding to those tracks into a single instance of AVAssetWriterInputGroup and add the group to the AVAssetWriter via -[AVAssetWriter addInputGroup:]. If the output format supports mutually exlusive relationships among tracks, the AVAssetWriter will mark the tracks as mutually exclusive to each other.
Note that because AVAssetWriterInputGroup is a subclass of AVMediaSelectionGroup, clients can examine the media selection options that will be available on the output asset before the asset is written. Best results for examining the options of the AVAssetWriterInputGroup will be obtained after associating the AVAssetWriterInputs of the AVAsset as appropriate via -[AVAssetWriterInput addTrackAssociationWithTrackOfInput:type:] and by initializing each AVAssetWriterInput with a source format hint, where appropriate.
*/
NS_CLASS_AVAILABLE(10_9, 7_0)
@interface AVAssetWriterInputGroup : AVMediaSelectionGroup
{
@private
AVAssetWriterInputGroupInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*
@method assetWriterInputGroupWithInputs:defaultInput:
@abstract
Creates an instance of AVAssetWriterInputGroup, for use with -[AVAssetWriter addInputGroup:].
@param inputs
The collection of AVAssetWriterInputs to be grouped together.
@param defaultInput
The instance of AVAssetWriterInput in the group to designate as the default. When the input group is added to an AVAssetWriter via -addInputGroup:, the value of marksOutputTrackAsEnabled will automatically be set to YES for the default input and set to NO for all of the other inputs in the group.
@result
An instance of AVAssetWriterInputGroup, for use with -[AVAssetWriter addInputGroup:].
*/
+ (instancetype)assetWriterInputGroupWithInputs:(NSArray<AVAssetWriterInput *> *)inputs defaultInput:(nullable AVAssetWriterInput *)defaultInput;
/*
@method initWithInputs:defaultInput:
@abstract
Creates an instance of AVAssetWriterInputGroup, for use with -[AVAssetWriter addInputGroup:].
@param inputs
The collection of AVAssetWriterInputs to be grouped together.
@param defaultInput
The instance of AVAssetWriterInput in the group to designate as the default. When the input group is added to an AVAssetWriter via -addInputGroup:, the value of marksOutputTrackAsEnabled will automatically be set to YES for the default input and set to NO for all of the other inputs in the group.
@result
An instance of AVAssetWriterInputGroup, for use with -[AVAssetWriter addInputGroup:].
*/
- (instancetype)initWithInputs:(NSArray<AVAssetWriterInput *> *)inputs defaultInput:(nullable AVAssetWriterInput *)defaultInput NS_DESIGNATED_INITIALIZER;
/*!
@property inputs
@abstract
The inputs grouped together by the receiver.
@discussion
The value of this property is an NSArray containing concrete instances of AVAssetWriterInput.
*/
@property (nonatomic, readonly) NSArray<AVAssetWriterInput *> *inputs;
/*!
@property defaultInput
@abstract
The input designated at the defaultInput of the receiver.
@discussion
The value of this property is a concrete instance of AVAssetWriterInput.
*/
@property (nonatomic, readonly, nullable) AVAssetWriterInput *defaultInput;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,861 +0,0 @@
/*
File: AVAssetWriterInput.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVMediaFormat.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMTime.h>
#import <CoreMedia/CMTimeRange.h>
#import <CoreMedia/CMSampleBuffer.h>
#import <CoreVideo/CVPixelBuffer.h>
#import <CoreMedia/CMFormatDescription.h>
@class AVMetadataItem;
@class AVAssetWriterInputInternal;
NS_ASSUME_NONNULL_BEGIN
/*!
@class AVAssetWriterInput
@abstract
AVAssetWriterInput defines an interface for appending either new media samples or references to existing media samples packaged as CMSampleBuffer objects to a single track of the output file of an AVAssetWriter.
@discussion
Clients that need to write multiple concurrent tracks of media data should use one AVAssetWriterInput instance per track. In order to write multiple concurrent tracks with ideal interleaving of media data, clients should observe the value returned by the readyForMoreMediaData property of each AVAssetWriterInput instance.
AVAssetWriterInput also supports writing per-track metadata collections to the output file.
As of OS X 10.10 and iOS 8.0 AVAssetWriterInput can also be used to create tracks that are not self-contained. Such tracks reference sample data that is located in another file. This is currently supported only for instances of AVAssetWriterInput attached to an instance of AVAssetWriter that writes files of type AVFileTypeQuickTimeMovie.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetWriterInput : NSObject
{
@private
AVAssetWriterInputInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetWriterInputWithMediaType:outputSettings:
@abstract
Creates a new input of the specified media type to receive sample buffers for writing to the output file.
@param mediaType
The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
@param outputSettings
The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
@result
An instance of AVAssetWriterInput.
@discussion
Each new input accepts data for a new track of the AVAssetWriter's output file. Inputs are added to an asset writer using -[AVAssetWriter addInput:].
Passing nil for output settings instructs the input to pass through appended samples, doing no processing before they are written to the output file. This is useful if, for example, you are appending buffers that are already in a desirable compressed format. However, if not writing to a QuickTime Movie file (i.e. the AVAssetWriter was initialized with a file type other than AVFileTypeQuickTimeMovie), AVAssetWriter only supports passing through a restricted set of media types and subtypes. In order to pass through media data to files other than AVFileTypeQuickTimeMovie, a non-NULL format hint must be provided using +assetWriterInputWithMediaType:outputSettings:sourceFormatHint: instead of this method.
For AVMediaTypeAudio the following keys are not currently supported in the outputSettings dictionary: AVEncoderAudioQualityKey and AVSampleRateConverterAudioQualityKey. When using this method to construct a new instance, an audio settings dictionary must be fully specified, meaning that it must contain AVFormatIDKey, AVSampleRateKey, and AVNumberOfChannelsKey. If no other channel layout information is available, a value of 1 for AVNumberOfChannelsKey will result in mono output and a value of 2 will result in stereo output. If AVNumberOfChannelsKey specifies a channel count greater than 2, the dictionary must also specify a value for AVChannelLayoutKey. For kAudioFormatLinearPCM, all relevant AVLinearPCM*Key keys must be included, and for kAudioFormatAppleLossless, AVEncoderBitDepthHintKey keys must be included. See +assetWriterInputWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys.
For AVMediaTypeVideo, any output settings dictionary must request a compressed video format. This means that the value passed in for outputSettings must follow the rules for compressed video output, as laid out in AVVideoSettings.h. When using this method to construct a new instance, a video settings dictionary must be fully specified, meaning that it must contain AVVideoCodecKey, AVVideoWidthKey, and AVVideoHeightKey. See +assetWriterInputWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys. On iOS, the only values currently supported for AVVideoCodecKey are AVVideoCodecTypeH264 and AVVideoCodecTypeJPEG. AVVideoCodecTypeH264 is not supported on iPhone 3G. For AVVideoScalingModeKey, the value AVVideoScalingModeFit is not supported.
*/
+ (instancetype)assetWriterInputWithMediaType:(AVMediaType)mediaType outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings;
/*!
@method assetWriterInputWithMediaType:outputSettings:sourceFormatHint:
@abstract
Creates a new input of the specified media type to receive sample buffers for writing to the output file.
@param mediaType
The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
@param outputSettings
The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
@param sourceFormatHint
A hint about the format of media data that will be appended to the new input.
@result
An instance of AVAssetWriterInput.
@discussion
A version of +assetWriterInputWithMediaType:outputSettings: that includes the ability to hint at the format of media data that will be appended to the new instance of AVAssetWriterInput. When a source format hint is provided, the outputSettings dictionary is not required to be fully specified. For AVMediaTypeAudio, this means that AVFormatIDKey is the only required key. For AVMediaTypeVideo, this means that AVVideoCodecKey is the only required key. Values for the remaining keys will be chosen by the asset writer input, with consideration given to the attributes of the source format. To guarantee successful file writing, clients who specify a format hint should ensure that subsequently-appended buffers are of the specified format.
An NSInvalidArgumentException will be thrown if the media type of the format description does not match the media type string passed into this method.
*/
+ (instancetype)assetWriterInputWithMediaType:(AVMediaType)mediaType outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings sourceFormatHint:(nullable CMFormatDescriptionRef)sourceFormatHint NS_AVAILABLE(10_8, 6_0);
/*!
@method initWithMediaType:outputSettings:
@abstract
Creates a new input of the specified media type to receive sample buffers for writing to the output file.
@param mediaType
The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
@param outputSettings
The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
@result
An instance of AVAssetWriterInput.
@discussion
Each new input accepts data for a new track of the AVAssetWriter's output file. Inputs are added to an asset writer using -[AVAssetWriter addInput:].
Passing nil for output settings instructs the input to pass through appended samples, doing no processing before they are written to the output file. This is useful if, for example, you are appending buffers that are already in a desirable compressed format. However, if not writing to a QuickTime Movie file (i.e. the AVAssetWriter was initialized with a file type other than AVFileTypeQuickTimeMovie), AVAssetWriter only supports passing through a restricted set of media types and subtypes. In order to pass through media data to files other than AVFileTypeQuickTimeMovie, a non-NULL format hint must be provided using -initWithMediaType:outputSettings:sourceFormatHint: instead of this method.
For AVMediaTypeAudio the following keys are not currently supported in the outputSettings dictionary: AVEncoderAudioQualityKey and AVSampleRateConverterAudioQualityKey. When using this initializer, an audio settings dictionary must be fully specified, meaning that it must contain AVFormatIDKey, AVSampleRateKey, and AVNumberOfChannelsKey. If no other channel layout information is available, a value of 1 for AVNumberOfChannelsKey will result in mono output and a value of 2 will result in stereo output. If AVNumberOfChannelsKey specifies a channel count greater than 2, the dictionary must also specify a value for AVChannelLayoutKey. For kAudioFormatLinearPCM, all relevant AVLinearPCM*Key keys must be included, and for kAudioFormatAppleLossless, AVEncoderBitDepthHintKey keys must be included. See -initWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys.
For AVMediaTypeVideo, any output settings dictionary must request a compressed video format. This means that the value passed in for outputSettings must follow the rules for compressed video output, as laid out in AVVideoSettings.h. When using this initializer, a video settings dictionary must be fully specified, meaning that it must contain AVVideoCodecKey, AVVideoWidthKey, and AVVideoHeightKey. See -initWithMediaType:outputSettings:sourceFormatHint: for a way to avoid having to specify a value for each of those keys. On iOS, the only values currently supported for AVVideoCodecKey are AVVideoCodecTypeH264 and AVVideoCodecTypeJPEG. AVVideoCodecTypeH264 is not supported on iPhone 3G. For AVVideoScalingModeKey, the value AVVideoScalingModeFit is not supported.
*/
- (instancetype)initWithMediaType:(AVMediaType)mediaType outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings;
/*!
@method initWithMediaType:outputSettings:sourceFormatHint:
@abstract
Creates a new input of the specified media type to receive sample buffers for writing to the output file. This is the designated initializer of AVAssetWriterInput.
@param mediaType
The media type of samples that will be accepted by the input. Media types are defined in AVMediaFormat.h.
@param outputSettings
The settings used for encoding the media appended to the output. See AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo and for more information on how to construct an output settings dictionary. If you only require simple preset-based output settings, see AVOutputSettingsAssistant.
@param sourceFormatHint
A hint about the format of media data that will be appended to the new input.
@result
An instance of AVAssetWriterInput.
@discussion
A version of -initWithMediaType:outputSettings: that includes the ability to hint at the format of media data that will be appended to the new instance of AVAssetWriterInput. When a source format hint is provided, the outputSettings dictionary is not required to be fully specified. For AVMediaTypeAudio, this means that AVFormatIDKey is the only required key. For AVMediaTypeVideo, this means that AVVideoCodecKey is the only required key. Values for the remaining keys will be chosen by the asset writer input, with consideration given to the attributes of the source format. To guarantee successful file writing, clients who specify a format hint should ensure that subsequently-appended buffers are of the specified format.
An NSInvalidArgumentException will be thrown if the media type of the format description does not match the media type string passed into this method.
*/
- (instancetype)initWithMediaType:(AVMediaType)mediaType outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings sourceFormatHint:(nullable CMFormatDescriptionRef)sourceFormatHint NS_AVAILABLE(10_8, 6_0) NS_DESIGNATED_INITIALIZER;
/*!
@property mediaType
@abstract
The media type of the samples that can be appended to the receiver.
@discussion
The value of this property is one of the media type strings defined in AVMediaFormat.h.
*/
@property (nonatomic, readonly) AVMediaType mediaType;
/*!
@property outputSettings
@abstract
The settings used for encoding the media appended to the output.
@discussion
The value of this property is an NSDictionary that contains values for keys as specified by either AVAudioSettings.h for AVMediaTypeAudio or AVVideoSettings.h for AVMediaTypeVideo. A value of nil indicates that the receiver will pass through appended samples, doing no processing before they are written to the output file.
*/
@property (nonatomic, readonly, nullable) NSDictionary<NSString *, id> *outputSettings;
/*!
@property sourceFormatHint
@abstract
The hint given at initialization time about the format of incoming media data.
@discussion
AVAssetWriterInput may be able to use this hint to fill in missing output settings or perform more upfront validation. To guarantee successful file writing, clients who specify a format hint should ensure that subsequently-appended media data are of the specified format.
*/
@property (nonatomic, readonly, nullable) __attribute__((NSObject)) CMFormatDescriptionRef sourceFormatHint NS_AVAILABLE(10_8, 6_0);
/*!
@property metadata
@abstract
A collection of metadata to be written to the track corresponding to the receiver.
@discussion
The value of this property is an array of AVMetadataItem objects representing the collection of track-level metadata to be written in the output file.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic, copy) NSArray<AVMetadataItem *> *metadata;
/*!
@property readyForMoreMediaData
@abstract
Indicates the readiness of the input to accept more media data.
@discussion
When there are multiple inputs, AVAssetWriter tries to write media data in an ideal interleaving pattern for efficiency in storage and playback. Each of its inputs signals its readiness to receive media data for writing according to that pattern via the value of readyForMoreMediaData. You can append media data to an input only while its readyForMoreMediaData property is YES.
Clients writing media data from a non-real-time source, such as an instance of AVAssetReader, should hold off on generating or obtaining more media data to append to an input when the value of readyForMoreMediaData is NO. To help with control of the supply of non-real-time media data, such clients can use -requestMediaDataWhenReadyOnQueue:usingBlock in order to specify a block that the input should invoke whenever it's ready for input to be appended.
Clients writing media data from a real-time source, such as an instance of AVCaptureOutput, should set the input's expectsMediaDataInRealTime property to YES to ensure that the value of readyForMoreMediaData is calculated appropriately. When expectsMediaDataInRealTime is YES, readyForMoreMediaData will become NO only when the input cannot process media samples as quickly as they are being provided by the client. If readyForMoreMediaData becomes NO for a real-time source, the client may need to drop samples or consider reducing the data rate of appended samples.
When the value of canPerformMultiplePasses is YES for any input attached to this input's asset writer, the value for this property may start as NO and/or be NO for long periods of time.
The value of readyForMoreMediaData will often change from NO to YES asynchronously, as previously supplied media data is processed and written to the output. It is possible for all of an AVAssetWriter's AVAssetWriterInputs temporarily to return NO for readyForMoreMediaData.
This property is key value observable. Observers should not assume that they will be notified of changes on a specific thread.
*/
@property (nonatomic, readonly, getter=isReadyForMoreMediaData) BOOL readyForMoreMediaData;
/*!
@property expectsMediaDataInRealTime
@abstract
Indicates whether the input should tailor its processing of media data for real-time sources.
@discussion
Clients appending media data to an input from a real-time source, such as an AVCaptureOutput, should set expectsMediaDataInRealTime to YES. This will ensure that readyForMoreMediaData is calculated appropriately for real-time usage.
For best results, do not set both this property and performsMultiPassEncodingIfSupported to YES.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic) BOOL expectsMediaDataInRealTime;
/*!
@method requestMediaDataWhenReadyOnQueue:usingBlock:
@abstract
Instructs the receiver to invoke a client-supplied block repeatedly, at its convenience, in order to gather media data for writing to the output file.
@param queue
The queue on which the block should be invoked.
@param block
The block the input should invoke to obtain media data.
@discussion
The block should append media data to the input either until the input's readyForMoreMediaData property becomes NO or until there is no more media data to supply (at which point it may choose to mark the input as finished via -markAsFinished). The block should then exit. After the block exits, if the input has not been marked as finished, once the input has processed the media data it has received and becomes ready for more media data again, it will invoke the block again in order to obtain more.
A typical use of this method, with a block that supplies media data to an input while respecting the input's readyForMoreMediaData property, might look like this:
[myAVAssetWriterInput requestMediaDataWhenReadyOnQueue:myInputSerialQueue usingBlock:^{
while ([myAVAssetWriterInput isReadyForMoreMediaData])
{
CMSampleBufferRef nextSampleBuffer = [self copyNextSampleBufferToWrite];
if (nextSampleBuffer)
{
[myAVAssetWriterInput appendSampleBuffer:nextSampleBuffer];
CFRelease(nextSampleBuffer);
}
else
{
[myAVAssetWriterInput markAsFinished];
break;
}
}
}];
This method is not recommended for use with a push-style buffer source, such as AVCaptureAudioDataOutput or AVCaptureVideoDataOutput, because such a combination will likely require intermediate queueing of buffers. Instead, this method is better suited to a pull-style buffer source such as AVAssetReaderOutput, as illustrated in the above example.
When using a push-style buffer source, it is generally better to immediately append each buffer to the AVAssetWriterInput, directly via -[AVAssetWriter appendSampleBuffer:], as it is received. Using this strategy, it is often possible to avoid having to queue up buffers in between the buffer source and the AVAssetWriterInput. Note that many of these push-style buffer sources also produce buffers in real-time, in which case the client should set expectsMediaDataInRealTime to YES.
Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
*/
- (void)requestMediaDataWhenReadyOnQueue:(dispatch_queue_t)queue usingBlock:(void (^)(void))block;
/*!
@method appendSampleBuffer:
@abstract
Appends samples to the receiver.
@param sampleBuffer
The CMSampleBuffer to be appended.
@result
A BOOL value indicating success of appending the sample buffer. If a result of NO is returned, clients can check the value of AVAssetWriter.status to determine whether the writing operation completed, failed, or was cancelled. If the status is AVAssetWriterStatusFailed, AVAsset.error will contain an instance of NSError that describes the failure.
@discussion
The timing information in the sample buffer, considered relative to the time passed to -[AVAssetWriter startSessionAtSourceTime:], will be used to determine the timing of those samples in the output file.
For track types other than audio tracks, to determine the duration of all samples in the output file other than the very last sample that's appended, the difference between the sample buffer's output DTS and the following sample buffer's output DTS will be used. The duration of the last sample is determined as follows:
1. If a marker sample buffer with kCMSampleBufferAttachmentKey_EndsPreviousSampleDuration is appended following the last media-bearing sample, the difference between the output DTS of the marker sample buffer and the output DTS of the last media-bearing sample will be used.
2. If the marker sample buffer is not provided and if the output duration of the last media-bearing sample is valid, it will be used.
3. if the output duration of the last media-bearing sample is not valid, the duration of the second-to-last sample will be used.
For audio tracks, the properties of each appended sample buffer are used to determine corresponding output durations.
The receiver will retain the CMSampleBuffer until it is done with it, and then release it. Do not modify a CMSampleBuffer or its contents after you have passed it to this method.
If the sample buffer contains audio data and the AVAssetWriterInput was intialized with an outputSettings dictionary then the format must be linear PCM. If the outputSettings dictionary was nil then audio data can be provided in a compressed format, and it will be passed through to the output without any re-compression. Note that advanced formats like AAC will have encoder delay present in their bitstreams. This data is inserted by the encoder and is necessary for proper decoding, but it is not meant to be played back. Clients who provide compressed audio bitstreams must use kCMSampleBufferAttachmentKey_TrimDurationAtStart to mark the encoder delay (generally restricted to the first sample buffer). Packetization can cause there to be extra audio frames in the last packet which are not meant to be played back. These remainder frames should be marked with kCMSampleBufferAttachmentKey_TrimDurationAtEnd. CMSampleBuffers obtained from AVAssetReader will already have the necessary trim attachments. Please see http://developer.apple.com/mac/library/technotes/tn2009/tn2258.html for more information about encoder delay. When attaching trims make sure that the output PTS of the sample buffer is what you expect. For example if you called -[AVAssetWriter startSessionAtSourceTime:kCMTimeZero] and you want your audio to start at time zero in the output file then make sure that the output PTS of the first non-fully trimmed audio sample buffer is kCMTimeZero.
If the sample buffer contains a CVPixelBuffer then the choice of pixel format will affect the performance and quality of the encode. For optimal performance the format of the pixel buffer should match one of the native formats supported by the selected video encoder. Below are some recommendations:
The H.264 encoder natively supports kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange and kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, which should be used with video and full range input respectively. The JPEG encoder on iOS natively supports kCVPixelFormatType_422YpCbCr8FullRange. For other video codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when encoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended on iOS and kCVPixelFormatType_32ARGB is recommended on OSX.
Pixel buffers not in a natively supported format will be converted internally prior to encoding when possible. Pixel format conversions within the same range (video or full) are generally faster than conversions between different ranges.
The ProRes encoders can preserve high bit depth sources, supporting up to 12bits/ch. ProRes 4444 can contain a mathematically lossless alpha channel and it doesn't do any chroma subsampling. This makes ProRes 4444 ideal for quality critical applications. If you are working with 8bit sources ProRes is also a good format to use due to its high image quality. Use either of the recommended pixel formats above. Note that RGB pixel formats by definition have 4:4:4 chroma sampling.
If you are working with high bit depth sources the following yuv pixel formats are recommended when encoding to ProRes: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, and kCVPixelFormatType_422YpCbCr10. When working in the RGB domain kCVPixelFormatType_64ARGB is recommended. Scaling and color matching are not currently supported when using AVAssetWriter with any of these high bit depth pixel formats. Please make sure that your track's output settings dictionary specifies the same width and height as the buffers you will be appending. Do not include AVVideoScalingModeKey or AVVideoColorPropertiesKey.
As of OS X 10.10 and iOS 8.0, this method can be used to add sample buffers that reference existing data in a file instead of containing media data to be appended to the file. This can be used to generate tracks that are not self-contained. In order to append such a sample reference to the track create a CMSampleBufferRef with a NULL dataBuffer and dataReady set to true and set the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachments on the sample buffer. Further documentation on how to create such a "sample reference" sample buffer can be found in the description of the kCMSampleBufferAttachmentKey_SampleReferenceURL and kCMSampleBufferAttachmentKey_SampleReferenceByteOffset attachment keys in the CMSampleBuffer documentation.
Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
*/
- (BOOL)appendSampleBuffer:(CMSampleBufferRef)sampleBuffer;
/*!
@method markAsFinished
@abstract
Indicates to the AVAssetWriter that no more buffers will be appended to this input.
@discussion
Clients that are monitoring each input's readyForMoreMediaData value must call markAsFinished on an input when they are done appending buffers to it. This is necessary to prevent other inputs from stalling, as they may otherwise wait forever for that input's media data, attempting to complete the ideal interleaving pattern.
After invoking this method from the serial queue passed to -requestMediaDataWhenReadyOnQueue:usingBlock:, the receiver is guaranteed to issue no more invocations of the block passed to that method. The same is true of -respondToEachPassDescriptionOnQueue:usingBlock:.
Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
*/
- (void)markAsFinished;
@end
@interface AVAssetWriterInput (AVAssetWriterInputLanguageProperties)
/*!
@property languageCode
@abstract
Indicates the language to associate with the track corresponding to the receiver, as an ISO 639-2/T language code; can be nil.
@discussion
Also see extendedLanguageTag below.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic, copy, nullable) NSString *languageCode NS_AVAILABLE(10_9, 7_0);
/*!
@property extendedLanguageTag
@abstract
Indicates the language tag to associate with the track corresponding to the receiver, as an IETF BCP 47 (RFC 4646) language identifier; can be nil.
@discussion
Extended language tags are normally set only when an ISO 639-2/T language code by itself is ambiguous, as in cases in which media data should be distinguished not only by language but also by the regional dialect in use or the writing system employed.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic, copy, nullable) NSString *extendedLanguageTag NS_AVAILABLE(10_9, 7_0);
@end
@interface AVAssetWriterInput (AVAssetWriterInputPropertiesForVisualCharacteristic)
/*!
@property naturalSize
@abstract
The size specified in the output file as the natural dimensions of the visual media data for display purposes.
@discussion
If the default value, CGSizeZero, is specified, the naturalSize of the track corresponding to the receiver is set according to dimensions indicated by the format descriptions that are ultimately written to the output track.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic) CGSize naturalSize NS_AVAILABLE(10_9, 7_0);
/*!
@property transform
@abstract
The transform specified in the output file as the preferred transformation of the visual media data for display purposes.
@discussion
If no value is specified, the identity transform is used.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic) CGAffineTransform transform;
@end
@interface AVAssetWriterInput (AVAssetWriterInputPropertiesForAudibleCharacteristic)
/*!
@property preferredVolume
@abstract
The preferred volume level to be stored in the output file.
@discussion
The value for this property should typically be in the range of 0.0 to 1.0. The default value is 1.0, which is equivalent to a "normal" volume level for audio media type. For all other media types the default value is 0.0.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic) float preferredVolume NS_AVAILABLE(10_9, 7_0);
@end
@interface AVAssetWriterInput (AVAssetWriterInputFileTypeSpecificProperties)
/*!
@property marksOutputTrackAsEnabled
@abstract
For file types that support enabled and disabled tracks, such as QuickTime Movie files, specifies whether the track corresponding to the receiver should be enabled by default for playback and processing. The default value is YES.
@discussion
When an input group is added to an AVAssetWriter (see -[AVAssetWriter addInputGroup:]), the value of marksOutputTrackAsEnabled will automatically be set to YES for the default input and set to NO for all of the other inputs in the group. In this case, if a new value is set on this property then an exception will be raised.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic) BOOL marksOutputTrackAsEnabled NS_AVAILABLE(10_9, 7_0);
/*!
@property mediaTimeScale
@abstract
For file types that support media time scales, such as QuickTime Movie files, specifies the media time scale to be used.
@discussion
The default value is 0, which indicates that the receiver should choose a convenient value, if applicable. It is an error to set a value other than 0 if the receiver has media type AVMediaTypeAudio.
This property cannot be set after writing has started.
*/
@property (nonatomic) CMTimeScale mediaTimeScale NS_AVAILABLE(10_7, 4_3);
/*!
@property preferredMediaChunkDuration
@abstract
For file types that support media chunk duration, such as QuickTime Movie files, specifies the duration to be used for each chunk of sample data in the output file.
@discussion
Chunk duration can influence the granularity of the I/O performed when reading a media file, e.g. during playback. A larger chunk duration can result in fewer reads from disk, at the potential expense of a higher memory footprint.
A "chunk" contains one or more samples. The total duration of the samples in a chunk is no greater than this preferred chunk duration, or the duration of a single sample if the sample's duration is greater than this preferred chunk duration.
The default value is kCMTimeInvalid, which means that the receiver will choose an appropriate default value. It is an error to set a chunk duration that is negative or non-numeric.
This property cannot be set after -startWriting has been called on the receiver.
*/
@property (nonatomic) CMTime preferredMediaChunkDuration NS_AVAILABLE(10_10, 8_0);
/*!
@property preferredMediaChunkAlignment
@abstract
For file types that support media chunk alignment, such as QuickTime Movie files, specifies the boundary for media chunk alignment in bytes (e.g. 512).
@discussion
The default value is 0, which means that the receiver will choose an appropriate default value. A value of 1 implies that no padding should be used to achieve a particular chunk alignment. It is an error to set a negative value for chunk alignment.
This property cannot be set after -startWriting has been called on the receiver.
*/
@property (nonatomic) NSInteger preferredMediaChunkAlignment NS_AVAILABLE(10_10, 8_0);
/*!
@property sampleReferenceBaseURL
@abstract
For file types that support writing sample references, such as QuickTime Movie files, specifies the base URL sample references are relative to.
@discussion
If the value of this property can be resolved as an absolute URL, the sample locations written to the file when appending sample references will be relative to this URL. The URL must point to a location that is in a directory that is a parent of the sample reference location.
Usage example:
Setting the sampleReferenceBaseURL property to "file:///User/johnappleseed/Movies/" and appending sample buffers with the kCMSampleBufferAttachmentKey_SampleReferenceURL attachment set to "file:///User/johnappleseed/Movies/data/movie1.mov" will cause the sample reference "data/movie1.mov" to be written to the movie.
If the value of the property cannot be resolved as an absolute URL or if it points to a location that is not in a parent directory of the sample reference location, the location referenced in the sample buffer will be written unmodified.
The default value is nil, which means that the location referenced in the sample buffer will be written unmodified.
This property cannot be set after -startWriting has been called on the receiver.
*/
@property (nonatomic, copy, nullable) NSURL *sampleReferenceBaseURL NS_AVAILABLE(10_10, 8_0);
typedef NSString *AVAssetWriterInputMediaDataLocation NS_STRING_ENUM NS_AVAILABLE(10_13, 11_0);
/*!
@constant AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData
Indicates that the media data should be interleaved with all other media data with this constant.
*/
AVF_EXPORT AVAssetWriterInputMediaDataLocation const AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData NS_AVAILABLE(10_13, 11_0);
/*!
@constant AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved
Indicates that the media data should be laid out before all the media data with AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData and not be interleaved.
*/
AVF_EXPORT AVAssetWriterInputMediaDataLocation const AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved NS_AVAILABLE(10_13, 11_0);
/*!
@property mediaDataLocation
@abstract
Specifies where the media data will be laid out and whether the media data will be interleaved as the main media data.
@discussion
If this value is set to AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved, AVAssetWriter tries to write the media data for this track before all the media data for AVAssetWriterInputs with this property set to AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData.
Use of this property is recommended for optimizing tracks that contain a small amount of data that is needed all at once, independent of playback time, such as chapter name tracks and chapter image tracks.
Keep it set to AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData for tracks whose media data that's needed only as its presentation time is approaching and, when multiple inputs are present that supply media data that will be played concurrently, should be interleaved for optimal access.
For file types that support preloading media data such as QuickTime movie file, if this value is set to AVAssetWriterInputMediaDataLocationBeforeMainMediaDataNotInterleaved, AVAssetWriter will write an indication such as 'load' atom that the whole media data should be preloaded.
The default value is AVAssetWriterInputMediaDataLocationInterleavedWithMainMediaData, which means that the receiver will not write the indication and that the media data will be interleaved.
This property cannot be set after -startWriting has been called on the receiver.
*/
@property (nonatomic, copy) AVAssetWriterInputMediaDataLocation mediaDataLocation NS_AVAILABLE(10_13, 11_0);
@end
@interface AVAssetWriterInput (AVAssetWriterInputTrackAssociations)
/*!
@method canAddTrackAssociationWithTrackOfInput:type:
@abstract
Tests whether an association between the tracks corresponding to a pair of inputs is valid.
@param input
The instance of AVAssetWriterInput with a corresponding track to associate with track corresponding with the receiver.
@param trackAssociationType
The type of track association to test. Common track association types, such as AVTrackAssociationTypeTimecode, are defined in AVAssetTrack.h.
@discussion
If the type of association requires tracks of specific media types that don't match the media types of the inputs, or if the output file type does not support track associations, -canAddTrackAssociationWithTrackOfInput:type: will return NO.
*/
- (BOOL)canAddTrackAssociationWithTrackOfInput:(AVAssetWriterInput *)input type:(NSString *)trackAssociationType NS_AVAILABLE(10_9, 7_0);
/*!
@method addTrackAssociationWithTrackOfInput:type:
@abstract
Associates the track corresponding to the specified input with the track corresponding with the receiver.
@param input
The instance of AVAssetWriterInput with a corresponding track to associate with track corresponding to the receiver.
@param trackAssociationType
The type of track association to add. Common track association types, such as AVTrackAssociationTypeTimecode, are defined in AVAssetTrack.h.
@discussion
If the type of association requires tracks of specific media types that don't match the media types of the inputs, or if the output file type does not support track associations, an NSInvalidArgumentException is raised.
Track associations cannot be added after writing on the receiver's AVAssetWriter has started.
*/
- (void)addTrackAssociationWithTrackOfInput:(AVAssetWriterInput *)input type:(NSString *)trackAssociationType NS_AVAILABLE(10_9, 7_0);
@end
@class AVAssetWriterInputPassDescription;
@interface AVAssetWriterInput (AVAssetWriterInputMultiPass)
/*!
@property performsMultiPassEncodingIfSupported
@abstract
Indicates whether the input should attempt to encode the source media data using multiple passes.
@discussion
The input may be able to achieve higher quality and/or lower data rate by performing multiple passes over the source media. It does this by analyzing the media data that has been appended and re-encoding certain segments with different parameters. In order to do this re-encoding, the media data for these segments must be appended again. See -markCurrentPassAsFinished and the property currentPassDescription for the mechanism by which the input nominates segments for re-appending.
When the value of this property is YES, the value of readyForMoreMediaData for other inputs attached to the same AVAssetWriter may be NO more often and/or for longer periods of time. In particular, the value of readyForMoreMediaData for inputs that do not (or cannot) perform multiple passes may start out as NO after -[AVAssetWriter startWriting] has been called and may not change to YES until after all multi-pass inputs have completed their final pass.
When the value of this property is YES, the input may store data in one or more temporary files before writing compressed samples to the output file. Use the AVAssetWriter property directoryForTemporaryFiles if you need to control the location of temporary file writing.
The default value is NO, meaning that no additional analysis will occur and no segments will be re-encoded. Not all asset writer input configurations (for example, inputs configured with certain media types or to use certain encoders) can benefit from performing multiple passes over the source media. To determine whether the selected encoder can perform multiple passes, query the value of canPerformMultiplePasses after calling -startWriting.
For best results, do not set both this property and expectsMediaDataInRealTime to YES.
This property cannot be set after writing on the receiver's AVAssetWriter has started.
*/
@property (nonatomic) BOOL performsMultiPassEncodingIfSupported NS_AVAILABLE(10_10, 8_0);
/*!
@property canPerformMultiplePasses
@abstract
Indicates whether the input might perform multiple passes over appended media data.
@discussion
When the value for this property is YES, your source for media data should be configured for random access. After appending all of the media data for the current pass (as specified by the currentPassDescription property), call -markCurrentPassAsFinished to start the process of determining whether additional passes are needed. Note that it is still possible in this case for the input to perform only the initial pass, if it determines that there will be no benefit to performing multiple passes.
When the value for this property is NO, your source for media data only needs to support sequential access. In this case, append all of the source media once and call -markAsFinished.
In the default configuration of AVAssetWriterInput, the value for this property will be NO. Currently the only way for this property to become YES is when performsMultiPassEncodingIfSupported has been set to YES. The final value will be available after -startWriting is called, when a specific encoder has been choosen.
This property is key-value observable.
*/
@property (nonatomic, readonly) BOOL canPerformMultiplePasses NS_AVAILABLE(10_10, 8_0);
/*!
@property currentPassDescription
@abstract
Provides an object that describes the requirements, such as source time ranges to append or re-append, for the current pass.
@discussion
If the value of this property is nil, it means there is no request to be fulfilled and -markAsFinished should be called on the asset writer input.
During the first pass, the request will contain a single time range from zero to positive infinity, indicating that all media from the source should be appended. This will also be true when canPerformMultiplePasses is NO, in which case only one pass will be performed.
The value of this property will be nil before -startWriting is called on the attached asset writer. It will transition to an initial non-nil value during the call to -startWriting. After that, the value of this property will change only after a call to -markCurrentPassAsFinished. For an easy way to be notified at the beginning of each pass, see -respondToEachPassDescriptionOnQueue:usingBlock:.
This property is key-value observable. Observers should not assume that they will be notified of changes on a specific thread.
*/
@property (readonly, nullable) AVAssetWriterInputPassDescription *currentPassDescription NS_AVAILABLE(10_10, 8_0);
/*!
@method respondToEachPassDescriptionOnQueue:usingBlock:
@abstract
Instructs the receiver to invoke a client-supplied block whenever a new pass has begun.
@param queue
The queue on which the block should be invoked.
@param block
A block the receiver should invoke whenever a new pass has begun.
@discussion
A typical block passed to this method will perform the following steps:
1. Query the value of the receiver's currentPassDescription property and reconfigure the source of media data (e.g. AVAssetReader) accordingly
2. Call -requestMediaDataWhenReadyOnQueue:usingBlock: to begin appending data for the current pass
3. Exit
When all media data has been appended for the current request, call -markCurrentPassAsFinished to begin the process of determining whether an additional pass is warranted. If an additional pass is warranted, the block passed to this method will be invoked to begin the next pass. If no additional passes are needed, the block passed to this method will be invoked one final time so the client can invoke -markAsFinished in response to the value of currentPassDescription becoming nil.
Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
*/
- (void)respondToEachPassDescriptionOnQueue:(dispatch_queue_t)queue usingBlock:(dispatch_block_t)block NS_AVAILABLE(10_10, 8_0);
/*!
@method markCurrentPassAsFinished
@abstract
Instructs the receiver to analyze the media data that has been appended and determine whether the results could be improved by re-encoding certain segments.
@discussion
When the value of canPerformMultiplePasses is YES, call this method after you have appended all of your media data. After the receiver analyzes whether an additional pass is warranted, the value of currentPassDescription will change (usually asynchronously) to describe how to set up for the next pass. Although it is possible to use key-value observing to determine when the value of currentPassDescription has changed, it is typically more convenient to invoke -respondToEachPassDescriptionOnQueue:usingBlock: in order to start the work for each pass.
After re-appending the media data for all of the time ranges of the new pass, call this method again to determine whether additional segments should be re-appended in another pass.
Calling this method effectively cancels any previous invocation of -requestMediaDataWhenReadyOnQueue:usingBlock:, meaning that -requestMediaDataWhenReadyOnQueue:usingBlock: can be invoked again for each new pass. -respondToEachPassDescriptionOnQueue:usingBlock: provides a convenient way to consolidate these invocations in your code.
After each pass, you have the option of keeping the most recent results by calling -markAsFinished instead of this method. If the value of currentPassDescription is nil at the beginning of a pass, call -markAsFinished to tell the receiver to not expect any further media data.
If the value of canPerformMultiplePasses is NO, the value of currentPassDescription will immediately become nil after calling this method.
Before calling this method, you must ensure that the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer.
*/
- (void)markCurrentPassAsFinished NS_AVAILABLE(10_10, 8_0);
@end
@class AVAssetWriterInputPassDescriptionInternal;
/*!
@class AVAssetWriterInputPassDescription
@abstract
Defines an interface for querying information about the requirements of the current pass, such as the time ranges of media data to append.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAssetWriterInputPassDescription : NSObject
{
@private
AVAssetWriterInputPassDescriptionInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*!
@property sourceTimeRanges
@abstract
An NSArray of NSValue objects wrapping CMTimeRange structures, each representing one source time range.
@discussion
The value of this property is suitable for using as a parameter for -[AVAssetReaderOutput resetForReadingTimeRanges:].
*/
@property (nonatomic, readonly) NSArray<NSValue *> *sourceTimeRanges;
@end
@class AVAssetWriterInputPixelBufferAdaptorInternal;
/*!
@class AVAssetWriterInputPixelBufferAdaptor
@abstract
Defines an interface for appending video samples packaged as CVPixelBuffer objects to a single AVAssetWriterInput object.
@discussion
Instances of AVAssetWriterInputPixelBufferAdaptor provide a CVPixelBufferPool that can be used to allocate pixel buffers for writing to the output file. Using the provided pixel buffer pool for buffer allocation is typically more efficient than appending pixel buffers allocated using a separate pool.
*/
NS_CLASS_AVAILABLE(10_7, 4_1)
@interface AVAssetWriterInputPixelBufferAdaptor : NSObject
{
@private
AVAssetWriterInputPixelBufferAdaptorInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetWriterInputPixelBufferAdaptorWithAssetWriterInput:sourcePixelBufferAttributes:
@abstract
Creates a new pixel buffer adaptor to receive pixel buffers for writing to the output file.
@param input
An instance of AVAssetWriterInput to which the receiver should append pixel buffers. Currently, only asset writer inputs that accept media data of type AVMediaTypeVideo can be used to initialize a pixel buffer adaptor.
@param sourcePixelBufferAttributes
Specifies the attributes of pixel buffers that will be vended by the input's CVPixelBufferPool.
@result
An instance of AVAssetWriterInputPixelBufferAdaptor.
@discussion
In order to take advantage of the improved efficiency of appending buffers created from the adaptor's pixel buffer pool, clients should specify pixel buffer attributes that most closely accommodate the source format of the video frames being appended.
Pixel buffer attributes keys for the pixel buffer pool are defined in <CoreVideo/CVPixelBuffer.h>. To specify the pixel format type, the pixelBufferAttributes dictionary should contain a value for kCVPixelBufferPixelFormatTypeKey. For example, use [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] for 8-bit-per-channel BGRA. See the discussion under appendPixelBuffer:withPresentationTime: for advice on choosing a pixel format.
Clients that do not need a pixel buffer pool for allocating buffers should set sourcePixelBufferAttributes to nil.
It is an error to initialize an instance of AVAssetWriterInputPixelBufferAdaptor with a sample buffer input that is already attached to another instance of AVAssetWriterInputPixelBufferAdaptor.
*/
+ (instancetype)assetWriterInputPixelBufferAdaptorWithAssetWriterInput:(AVAssetWriterInput *)input sourcePixelBufferAttributes:(nullable NSDictionary<NSString *, id> *)sourcePixelBufferAttributes;
/*!
@method initWithAssetWriterInput:sourcePixelBufferAttributes:
@abstract
Creates a new pixel buffer adaptor to receive pixel buffers for writing to the output file.
@param input
An instance of AVAssetWriterInput to which the receiver should append pixel buffers. Currently, only asset writer inputs that accept media data of type AVMediaTypeVideo can be used to initialize a pixel buffer adaptor.
@param sourcePixelBufferAttributes
Specifies the attributes of pixel buffers that will be vended by the input's CVPixelBufferPool.
@result
An instance of AVAssetWriterInputPixelBufferAdaptor.
@discussion
In order to take advantage of the improved efficiency of appending buffers created from the adaptor's pixel buffer pool, clients should specify pixel buffer attributes that most closely accommodate the source format of the video frames being appended.
Pixel buffer attributes keys for the pixel buffer pool are defined in <CoreVideo/CVPixelBuffer.h>. To specify the pixel format type, the pixelBufferAttributes dictionary should contain a value for kCVPixelBufferPixelFormatTypeKey. For example, use [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] for 8-bit-per-channel BGRA. See the discussion under appendPixelBuffer:withPresentationTime: for advice on choosing a pixel format.
Clients that do not need a pixel buffer pool for allocating buffers should set sourcePixelBufferAttributes to nil.
It is an error to initialize an instance of AVAssetWriterInputPixelBufferAdaptor with an asset writer input that is already attached to another instance of AVAssetWriterInputPixelBufferAdaptor. It is also an error to initialize an instance of AVAssetWriterInputPixelBufferAdaptor with an asset writer input whose asset writer has progressed beyond AVAssetWriterStatusUnknown.
*/
- (instancetype)initWithAssetWriterInput:(AVAssetWriterInput *)input sourcePixelBufferAttributes:(nullable NSDictionary<NSString *, id> *)sourcePixelBufferAttributes NS_DESIGNATED_INITIALIZER;
/*!
@property assetWriterInput
@abstract
The asset writer input to which the receiver should append pixel buffers.
*/
@property (nonatomic, readonly) AVAssetWriterInput *assetWriterInput;
/*!
@property sourcePixelBufferAttributes
@abstract
The pixel buffer attributes of pixel buffers that will be vended by the receiver's CVPixelBufferPool.
@discussion
The value of this property is a dictionary containing pixel buffer attributes keys defined in <CoreVideo/CVPixelBuffer.h>.
*/
@property (nonatomic, readonly, nullable) NSDictionary<NSString *, id> *sourcePixelBufferAttributes;
/*!
@property pixelBufferPool
@abstract
A pixel buffer pool that will vend and efficiently recycle CVPixelBuffer objects that can be appended to the receiver.
@discussion
For maximum efficiency, clients should create CVPixelBuffer objects for appendPixelBuffer:withPresentationTime: by using this pool with the CVPixelBufferPoolCreatePixelBuffer() function.
The value of this property will be NULL before -[AVAssetWriter startWriting] is called on the associated AVAssetWriter object.
This property is key value observable.
*/
@property (nonatomic, readonly, nullable) CVPixelBufferPoolRef pixelBufferPool;
/*!
@method appendPixelBuffer:withPresentationTime:
@abstract
Appends a pixel buffer to the receiver.
@param pixelBuffer
The CVPixelBuffer to be appended.
@param presentationTime
The presentation time for the pixel buffer to be appended. This time will be considered relative to the time passed to -[AVAssetWriter startSessionAtSourceTime:] to determine the timing of the frame in the output file.
@result
A BOOL value indicating success of appending the pixel buffer. If a result of NO is returned, clients can check the value of AVAssetWriter.status to determine whether the writing operation completed, failed, or was cancelled. If the status is AVAssetWriterStatusFailed, AVAsset.error will contain an instance of NSError that describes the failure.
@discussion
The receiver will retain the CVPixelBuffer until it is done with it, and then release it. Do not modify a CVPixelBuffer or its contents after you have passed it to this method.
For optimal performance the format of the pixel buffer should match one of the native formats supported by the selected video encoder. Below are some recommendations:
The H.264 encoder natively supports kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange and kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, which should be used with video and full range input respectively. The JPEG encoder on iOS natively supports kCVPixelFormatType_422YpCbCr8FullRange. For other video codecs on OSX, kCVPixelFormatType_422YpCbCr8 is the preferred pixel format for video and is generally the most performant when encoding. If you need to work in the RGB domain then kCVPixelFormatType_32BGRA is recommended on iOS and kCVPixelFormatType_32ARGB is recommended on OSX.
Pixel buffers not in a natively supported format will be converted internally prior to encoding when possible. Pixel format conversions within the same range (video or full) are generally faster than conversions between different ranges.
The ProRes encoders can preserve high bit depth sources, supporting up to 12bits/ch. ProRes 4444 can contain a mathematically lossless alpha channel and it doesn't do any chroma subsampling. This makes ProRes 4444 ideal for quality critical applications. If you are working with 8bit sources ProRes is also a good format to use due to its high image quality. Use either of the recommended pixel formats above. Note that RGB pixel formats by definition have 4:4:4 chroma sampling.
If you are working with high bit depth sources the following yuv pixel formats are recommended when encoding to ProRes: kCVPixelFormatType_4444AYpCbCr16, kCVPixelFormatType_422YpCbCr16, and kCVPixelFormatType_422YpCbCr10. When working in the RGB domain kCVPixelFormatType_64ARGB is recommended. Scaling and color matching are not currently supported when using AVAssetWriter with any of these high bit depth pixel formats. Please make sure that your track's output settings dictionary specifies the same width and height as the buffers you will be appending. Do not include AVVideoScalingModeKey or AVVideoColorPropertiesKey.
Before calling this method, you must ensure that the input that underlies the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
*/
- (BOOL)appendPixelBuffer:(CVPixelBufferRef)pixelBuffer withPresentationTime:(CMTime)presentationTime;
@end
@class AVTimedMetadataGroup;
@class AVAssetWriterInputMetadataAdaptorInternal;
/*!
@class AVAssetWriterInputMetadataAdaptor
@abstract
Defines an interface for writing metadata, packaged as instances of AVTimedMetadataGroup, to a single AVAssetWriterInput object.
*/
NS_CLASS_AVAILABLE(10_10, 8_0)
@interface AVAssetWriterInputMetadataAdaptor : NSObject {
AVAssetWriterInputMetadataAdaptorInternal *_internal;
}
AV_INIT_UNAVAILABLE
/*!
@method assetWriterInputMetadataAdaptorWithAssetWriterInput:
@abstract
Creates a new timed metadata group adaptor to receive instances of AVTimedMetadataGroup for writing to the output file.
@param input
An instance of AVAssetWriterInput to which the receiver should append groups of timed metadata. Only asset writer inputs that accept media data of type AVMediaTypeMetadata can be used to initialize a timed metadata group adaptor.
@result
An instance of AVAssetWriterInputMetadataAdaptor.
@discussion
The instance of AVAssetWriterInput passed in to this method must have been created with a format hint indicating all possible combinations of identifier (or, alternatively, key and keySpace), dataType, and extendedLanguageTag that will be appended to the metadata adaptor. It is an error to append metadata items not represented in the input's format hint.
It is an error to initialize an instance of AVAssetWriterInputMetadataAdaptor with an asset writer input that is already attached to another instance of AVAssetWriterInputMetadataAdaptor. It is also an error to initialize an instance of AVAssetWriterInputMetadataAdaptor with an asset writer input whose asset writer has progressed beyond AVAssetWriterStatusUnknown.
*/
+ (instancetype)assetWriterInputMetadataAdaptorWithAssetWriterInput:(AVAssetWriterInput *)input;
/*!
@method initWithAssetWriterInput:
@abstract
Creates a new timed metadator group adaptor to receive instances of AVTimedMetadataGroup for writing to the output file.
@param input
An instance of AVAssetWriterInput to which the receiver should append groups of timed metadata. Only asset writer inputs that accept media data of type AVMediaTypeMetadata can be used to initialize a timed metadata group adaptor.
@result
An instance of AVAssetWriterInputMetadataAdaptor.
@discussion
The instance of AVAssetWriterInput passed in to this method must have been created with a format hint indicating all possible combinations of identifier (or, alternatively, key and keySpace), dataType, and extendedLanguageTag that will be appended to the metadata adaptor. It is an error to append metadata items not represented in the input's format hint. For help creating a suitable format hint, see -[AVTimedMetadataGroup copyFormatDescription].
It is an error to initialize an instance of AVAssetWriterInputMetadataAdaptor with an asset writer input that is already attached to another instance of AVAssetWriterInputMetadataAdaptor. It is also an error to initialize an instance of AVAssetWriterInputMetadataAdaptor with an asset writer input whose asset writer has progressed beyond AVAssetWriterStatusUnknown.
*/
- (instancetype)initWithAssetWriterInput:(AVAssetWriterInput *)input NS_DESIGNATED_INITIALIZER;
/*!
@property assetWriterInput
@abstract
The asset writer input to which the receiver should append timed metadata groups.
*/
@property (nonatomic, readonly) AVAssetWriterInput *assetWriterInput;
/*!
@method appendTimedMetadataGroup:
@abstract
Appends a timed metadata group to the receiver.
@param timedMetadataGroup
The AVTimedMetadataGroup to be appended.
@result
A BOOL value indicating success of appending the timed metadata group. If a result of NO is returned, AVAssetWriter.error will contain more information about why apending the timed metadata group failed.
@discussion
The receiver will retain the AVTimedMetadataGroup until it is done with it, and then release it.
The timing of the metadata items in the output asset will correspond to the timeRange of the AVTimedMetadataGroup, regardless of the values of the time and duration properties of the individual items.
Before calling this method, you must ensure that the input that underlies the receiver is attached to an AVAssetWriter via a prior call to -addInput: and that -startWriting has been called on the asset writer. It is an error to invoke this method before starting a session (via -[AVAssetWriter startSessionAtSourceTime:]) or after ending a session (via -[AVAssetWriter endSessionAtSourceTime:]).
*/
- (BOOL)appendTimedMetadataGroup:(AVTimedMetadataGroup *)timedMetadataGroup;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,70 +0,0 @@
/*
File: AVAsynchronousKeyValueLoading.h
Framework: AVFoundation
Copyright 2010-2016 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSInteger, AVKeyValueStatus) {
AVKeyValueStatusUnknown,
AVKeyValueStatusLoading,
AVKeyValueStatusLoaded,
AVKeyValueStatusFailed,
AVKeyValueStatusCancelled
};
/*!
@protocol AVAsynchronousKeyValueLoading
@abstract The AVAsynchronousKeyValueLoading protocol defines methods that let clients use an AVAsset or AVAssetTrack object without blocking a thread. Using methods in the protocol, one can find out the current status of a key (for example, whether the corresponding value has been loaded); and ask the object to load values asynchronously, informing the client when the operation has completed.
@discussion
Because of the nature of timed audiovisual media, successful initialization of an asset does not necessarily mean that all its data is immediately available. Instead, an asset will wait to load data until an operation is performed on it (for example, directly invoking any relevant AVAsset methods, playback via an AVPlayerItem object, export using AVAssetExportSession, reading using an instance of AVAssetReader, and so on). This means that although you can request the value of any key at any time, and its value will be returned synchronously, the calling thread may be blocked until the request can be satisfied. To avoid blocking, you can:
1. First, determine whether the value for a given key is available using statusOfValueForKey:error:.
2. If a value has not been loaded yet, you can ask for to load one or more values and be notified when they become available using loadValuesAsynchronouslyForKeys:completionHandler:.
Even for use cases that may typically support ready access to some keys (such as for assets initialized with URLs for files in the local filesystem), slow I/O may require AVAsset to block before returning their values. Although blocking may be acceptable for OS X API clients in cases where assets are being prepared on background threads or in operation queues, in all cases in which blocking should be avoided you should use loadValuesAsynchronouslyForKeys:completionHandler:. For iOS clients, blocking to obtain the value of a key synchronously is never recommended under any circumstances.
*/
@protocol AVAsynchronousKeyValueLoading
@required
/*!
@method statusOfValueForKey:
@abstract Reports whether the value for a key is immediately available without blocking.
@param key
An instance of NSString containing the specified key.
@param outError
If the status of the value for the key is AVKeyValueStatusFailed, *outError is set to a non-nil NSError that describes the failure that occurred.
@result The value's current loading status.
@discussion
Clients can use -statusOfValueForKey: to determine the availability of the value of any key of interest. However, this method alone does not prompt the receiver to load the value of a key that's not yet available. To request values for keys that may not already be loaded, without blocking, use -loadValuesAsynchronouslyForKeys:completionHandler:, await invocation of the completion handler, and test the availability of each key via -statusOfValueForKey: before invoking its getter.
Even if access to values of some keys may be readily available, as can occur with receivers initialized with URLs for resources on local volumes, extensive I/O or parsing may be needed for these same receivers to provide values for other keys. A duration for a local MP3 file, for example, may be expensive to obtain, even if the values for other AVAsset properties may be trivial to obtain.
Blocking that may occur when calling the getter for any key should therefore be avoided in the general case by loading values for all keys of interest via -loadValuesAsynchronouslyForKeys:completionHandler: and testing the availability of the requested values before fetching them by calling getters.
The sole exception to this general rule is in usage on Mac OS X on the desktop, where it may be acceptable to block in cases in which the client is preparing objects for use on background threads or in operation queues. On iOS, values should always be loaded asynchronously prior to calling getters for the values, in any usage scenario.
*/
- (AVKeyValueStatus)statusOfValueForKey:(NSString *)key error:(NSError * _Nullable * _Nullable)outError;
/*!
@method loadValuesAsynchronouslyForKeys:completionHandler:
@abstract Directs the target to load the values of any of the specified keys that are not already loaded.
@param keys
An instance of NSArray, containing NSStrings for the specified keys.
@param completionHandler
The block to be invoked when loading succeeds, fails, or is cancelled.
*/
- (void)loadValuesAsynchronouslyForKeys:(NSArray<NSString *> *)keys completionHandler:(nullable void (^)(void))handler;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,9 +0,0 @@
/*
File: AVAudioBuffer.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioBuffer.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioChannelLayout.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioChannelLayout.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioConnectionPoint.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioConnectionPoint.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioConverter.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioConverter.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioEngine.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioEngine.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioEnvironmentNode.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioEnvironmentNode.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioFile.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioFile.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioFormat.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioFormat.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioIONode.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioIONode.h>

View file

@ -1,196 +0,0 @@
/*
File: AVAudioMix.h
Framework: AVFoundation
Copyright 2010-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <AVFoundation/AVAudioProcessingSettings.h>
#import <Foundation/Foundation.h>
#import <CoreMedia/CMBase.h>
#import <CoreMedia/CMTime.h>
#import <CoreMedia/CMTimeRange.h>
#import <MediaToolbox/MTAudioProcessingTap.h>
/*!
@class AVAudioMix
@abstract Allows custom audio processing to be performed on audio tracks during playback or other operations.
*/
@class AVAudioMixInternal;
@class AVAudioMixInputParameters;
NS_ASSUME_NONNULL_BEGIN
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAudioMix : NSObject <NSCopying, NSMutableCopying> {
@private
AVAudioMixInternal *_audioMix;
}
/* Indicates parameters for inputs to the mix; an NSArray of instances of AVAudioMixInputParameters. Note that an instance of AVAudioMixInputParameters is not required for each audio track that contributes to the mix; audio for those without associated AVAudioMixInputParameters will be included in the mix, processed according to default behavior. */
@property (nonatomic, readonly, copy) NSArray<AVAudioMixInputParameters *> *inputParameters;
@end
@class AVMutableAudioMixInternal;
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVMutableAudioMix : AVAudioMix {
@private
AVMutableAudioMixInternal *_mutableAudioMix __attribute__((unused));
}
/*
@method audioMix
@abstract Returns a new instance of AVMutableAudioMix with a nil array of inputParameters.
*/
+ (instancetype)audioMix;
/*!
@property inputParameters
@abstract Indicates parameters for inputs to the mix; an NSArray of instances of AVAudioMixInputParameters.
@discussion Note that an instance of AVAudioMixInputParameters is not required for each audio track that contributes to the mix; audio for those without associated AVAudioMixInputParameters will be included in the mix, processed according to default behavior.
*/
@property (nonatomic, copy) NSArray<AVAudioMixInputParameters *> *inputParameters;
@end
/*!
@class AVAudioMixInputParameters
@abstract Provides time-varying parameters to apply to an input of an audio mix. Audio volume is currently supported as a time-varying parameter.
@discussion
Use an instance of AVAudioMixInputParameters to apply audio volume ramps for an input to an audio mix.
AVAudioMixInputParameters are associated with audio tracks via the trackID property.
Notes on audio volume ramps:
Before the first time at which a volume is set, a volume of 1.0 used; after the last time for which a volume has been set, the last volume is used.
Within the timeRange of a volume ramp, the volume is interpolated between the startVolume and endVolume of the ramp.
For example, setting the volume to 1.0 at time 0 and also setting a volume ramp from a volume of 0.5 to 0.2 with a timeRange of [4.0, 5.0]
results in an audio volume parameters that hold the volume constant at 1.0 from 0.0 sec to 4.0 sec, then cause it to jump to 0.5 and
descend to 0.2 from 4.0 sec to 9.0 sec, holding constant at 0.2 thereafter.
*/
@class AVAudioMixInputParametersInternal;
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVAudioMixInputParameters : NSObject <NSCopying, NSMutableCopying> {
@private
AVAudioMixInputParametersInternal *_inputParameters;
}
/*!
@property trackID
@abstract Indicates the trackID of the audio track to which the parameters should be applied.
*/
@property (nonatomic, readonly) CMPersistentTrackID trackID;
/*!
@property audioTimePitchAlgorithm
@abstract Indicates the processing algorithm used to manage audio pitch at varying rates and for scaled audio edits.
@discussion
Constants for various time pitch algorithms, e.g. AVAudioTimePitchSpectral, are defined in AVAudioProcessingSettings.h.
Can be nil, in which case the audioTimePitchAlgorithm set on the AVPlayerItem, AVAssetExportSession, or AVAssetReaderAudioMixOutput on which the AVAudioMix is set will be used for the associated track.
*/
@property (nonatomic, readonly, copy, nullable) AVAudioTimePitchAlgorithm audioTimePitchAlgorithm NS_AVAILABLE(10_10, 7_0);
/*!
@property audioTapProcessor
@abstract Indicates the audio processing tap that will be used for the audio track.
*/
@property (nonatomic, readonly, retain, nullable) __attribute__((NSObject)) MTAudioProcessingTapRef audioTapProcessor NS_AVAILABLE(10_9, 6_0);
/*
@method getVolumeRampForTime:startVolume:endVolume:timeRange:
@abstract Obtains the volume ramp that includes the specified time.
@param time
If a ramp with a timeRange that contains the specified time has been set, information about the effective ramp for that time is supplied.
Otherwise, information about the first ramp that starts after the specified time is supplied.
@param startVolume
A pointer to a float to receive the starting volume value for the volume ramp. May be NULL.
@param endVolume
A pointer to a float to receive the ending volume value for the volume ramp. May be NULL.
@param timeRange
A pointer to a CMTimeRange to receive the timeRange of the volume ramp. May be NULL.
@result
An indication of success. NO will be returned if the specified time is beyond the duration of the last volume ramp that has been set.
*/
- (BOOL)getVolumeRampForTime:(CMTime)time startVolume:(nullable float *)startVolume endVolume:(nullable float *)endVolume timeRange:(nullable CMTimeRange *)timeRange;
@end
@class AVAssetTrack;
@class AVPlayerItemTrack;
@class AVMutableAudioMixInputParametersInternal;
NS_CLASS_AVAILABLE(10_7, 4_0)
@interface AVMutableAudioMixInputParameters : AVAudioMixInputParameters {
@private
AVMutableAudioMixInputParametersInternal *_mutableInputParameters __attribute__((unused));
}
/*
@method audioMixInputParametersWithTrack:
@abstract Returns a new instance of AVMutableAudioMixInputParameters with no volume ramps and a trackID set to the specified track's trackID.
@param track
A reference to an AVAssetTrack.
*/
+ (instancetype)audioMixInputParametersWithTrack:(nullable AVAssetTrack *)track;
/*
@method audioMixInputParameters
@abstract Returns a new instance of AVMutableAudioMixInputParameters with no volume ramps and a trackID initialized to kCMPersistentTrackID_Invalid.
*/
+ (instancetype)audioMixInputParameters;
/*!
@property trackID
@abstract Indicates the trackID of the audio track to which the parameters should be applied.
*/
@property (nonatomic) CMPersistentTrackID trackID;
/*!
@property audioTimePitchAlgorithm
@abstract Indicates the processing algorithm used to manage audio pitch at varying rates and for scaled audio edits.
@discussion
Constants for various time pitch algorithms, e.g. AVAudioTimePitchSpectral, are defined in AVAudioProcessingSettings.h.
Can be nil, in which case the audioTimePitchAlgorithm set on the AVPlayerItem, AVAssetExportSession, or AVAssetReaderAudioMixOutput on which the AVAudioMix is set will be used for the associated track.
*/
@property (nonatomic, copy, nullable) AVAudioTimePitchAlgorithm audioTimePitchAlgorithm NS_AVAILABLE(10_10, 7_0);
/*!
@property audioTapProcessor
@abstract Indicates the audio processing tap that will be used for the audio track.
*/
@property (nonatomic, retain, nullable) __attribute__((NSObject)) MTAudioProcessingTapRef audioTapProcessor NS_AVAILABLE(10_9, 6_0);
/*
@method setVolumeRampFromStartVolume:toEndVolume:timeRange:
@abstract Sets a volume ramp to apply during the specified timeRange.
*/
- (void)setVolumeRampFromStartVolume:(float)startVolume toEndVolume:(float)endVolume timeRange:(CMTimeRange)timeRange;
/*
@method setVolume:atTime:
@abstract Sets the value of the audio volume at a specific time.
*/
- (void)setVolume:(float)volume atTime:(CMTime)time;
@end
NS_ASSUME_NONNULL_END

View file

@ -1,9 +0,0 @@
/*
File: AVAudioMixerNode.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioMixerNode.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioMixing.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioMixing.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioNode.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioNode.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioPlayer.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioPlayer.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioPlayerNode.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioPlayerNode.h>

View file

@ -1,49 +0,0 @@
/*
File: AVAudioProcessingSettings.h
Framework: AVFoundation
Copyright 2013-2017 Apple Inc. All rights reserved.
*/
#import <AVFoundation/AVBase.h>
#import <Foundation/Foundation.h>
/*!
@typedef AVAudioTimePitchAlgorithm
@abstract
The type of a time pitch algorithm.
@discussion
On OS X, the default algorithm for all time pitch operations is AVAudioTimePitchAlgorithmSpectral. On iOS, the default algorithm for playback is AVAudioTimePitchAlgorithmLowQualityZeroLatency and the default for export & other offline processing is AVAudioTimePitchAlgorithmSpectral.
For scaled audio edits, i.e. when the timeMapping of an AVAssetTrackSegment is between timeRanges of unequal duration, it is important to choose an algorithm that supports the full range of edit rates present in the source media. AVAudioTimePitchAlgorithmSpectral is often the best choice due to the highly inclusive range of rates it supports, assuming that it is desirable to maintain a constant pitch regardless of the edit rate. If it is instead desirable to allow the pitch to vary with the edit rate, AVAudioTimePitchAlgorithmVarispeed is the best choice.
*/
typedef NSString * AVAudioTimePitchAlgorithm NS_STRING_ENUM;
/*!
@abstract Values for time pitch algorithm
@constant AVAudioTimePitchAlgorithmLowQualityZeroLatency
Low quality, very inexpensive. Suitable for brief fast-forward/rewind effects, low quality voice.
Rate snapped to {0.5, 0.666667, 0.8, 1.0, 1.25, 1.5, 2.0}.
@constant AVAudioTimePitchAlgorithmTimeDomain
Modest quality, less expensive. Suitable for voice.
Variable rate from 1/32 to 32.
@constant AVAudioTimePitchAlgorithmSpectral
Highest quality, most computationally expensive. Suitable for music.
Variable rate from 1/32 to 32.
@constant AVAudioTimePitchAlgorithmVarispeed
High quality, no pitch correction. Pitch varies with rate.
Variable rate from 1/32 to 32.
*/
AVF_EXPORT AVAudioTimePitchAlgorithm const AVAudioTimePitchAlgorithmLowQualityZeroLatency NS_AVAILABLE_IOS(7_0);
AVF_EXPORT AVAudioTimePitchAlgorithm const AVAudioTimePitchAlgorithmTimeDomain NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT AVAudioTimePitchAlgorithm const AVAudioTimePitchAlgorithmSpectral NS_AVAILABLE(10_9, 7_0);
AVF_EXPORT AVAudioTimePitchAlgorithm const AVAudioTimePitchAlgorithmVarispeed NS_AVAILABLE(10_9, 7_0);

View file

@ -1,9 +0,0 @@
/*
File: AVAudioRecorder.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioRecorder.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioSequencer.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioSequencer.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioSession.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioSession.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioSettings.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioSettings.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioTime.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioTime.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioTypes.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioTypes.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnit.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnit.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitComponent.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitComponent.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitDelay.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitDelay.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitDistortion.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitDistortion.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitEQ.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitEQ.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitEffect.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitEffect.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitGenerator.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitGenerator.h>

View file

@ -1,9 +0,0 @@
/*
File: AVAudioUnitMIDIInstrument.h
Framework: AVFoundation
Copyright 2016 Apple Inc. All rights reserved.
*/
#import <AVFAudio/AVAudioUnitMIDIInstrument.h>

Some files were not shown because too many files have changed in this diff Show more