By the end of this lesson, you will be able to:
ℹ️ Info Definition: Audio processing in mobile apps involves recording, playing, editing, and analyzing sound. Modern apps like Spotify, SoundCloud, and voice memo apps rely on sophisticated audio processing capabilities.
Audio and music apps are among the most popular and engaging mobile applications:
App Type | Examples | Key Features |
---|---|---|
Music Players | Spotify, Apple Music | Streaming, playlists, recommendations |
Recording Apps | Voice Memos, Otter.ai | Recording, transcription, editing |
Audio Creation | GarageBand, FL Studio Mobile | Multi-track editing, effects, mixing |
Social Audio | Clubhouse, Discord | Live audio, rooms, voice chat |
Learning Apps | Language apps, podcasts | Audio lessons, pronunciation |
💡 Market Insight: The global music streaming market is worth $25+ billion and growing rapidly, with mobile apps driving most of the growth!
# Core audio libraries
npx expo install expo-av
npx expo install expo-media-library
npx expo install expo-audio
# For advanced audio processing
npm install react-native-sound
npm install react-native-audio-recorder-player
# For audio visualization
npm install react-native-svg
npm install d3-shape
// utils/audioPermissions.ts
import { Audio } from 'expo-av';
import * as MediaLibrary from 'expo-media-library';
export const requestAudioPermissions = async () => {
try {
// Request recording permission
const { status: recordingStatus } = await Audio.requestPermissionsAsync();
// Request media library permission
const { status: mediaStatus } = await MediaLibrary.requestPermissionsAsync();
if (recordingStatus !== 'granted' || mediaStatus !== 'granted') {
throw new Error('Audio permissions not granted');
}
// Configure audio session
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
playsInSilentModeIOS: true,
shouldDuckAndroid: true,
playThroughEarpieceAndroid: false,
});
return true;
} catch (error) {
console.error('Error requesting audio permissions:', error);
return false;
}
};
// components/VoiceRecorder.tsx
import React, { useState, useEffect } from 'react';
import {
View,
Text,
TouchableOpacity,
StyleSheet,
Alert,
Animated,
} from 'react-native';
import { Audio, AVPlaybackStatus } from 'expo-av';
import { Ionicons } from '@expo/vector-icons';
interface Recording {
id: string;
uri: string;
duration: number;
created: Date;
}
export const VoiceRecorder: React.FC = () => {
const [recording, setRecording] = useState<Audio.Recording | null>(null);
const [recordings, setRecordings] = useState<Recording[]>([]);
const [isRecording, setIsRecording] = useState(false);
const [isPlaying, setIsPlaying] = useState(false);
const [sound, setSound] = useState<Audio.Sound | null>(null);
const [currentPlayingId, setCurrentPlayingId] = useState<string | null>(null);
const [recordingTime, setRecordingTime] = useState(0);
const [pulseAnim] = useState(new Animated.Value(1));
useEffect(() => {
return () => {
if (sound) {
sound.unloadAsync();
}
};
}, [sound]);
const startRecording = async () => {
try {
const permission = await Audio.requestPermissionsAsync();
if (permission.status !== 'granted') {
Alert.alert('Permission required', 'Please grant audio permissions');
return;
}
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
playsInSilentModeIOS: true,
});
const { recording } = await Audio.Recording.createAsync(
Audio.RecordingOptionsPresets.HIGH_QUALITY
);
setRecording(recording);
setIsRecording(true);
setRecordingTime(0);
// Start pulse animation
Animated.loop(
Animated.sequence([
Animated.timing(pulseAnim, {
toValue: 1.2,
duration: 500,
useNativeDriver: true,
}),
Animated.timing(pulseAnim, {
toValue: 1,
duration: 500,
useNativeDriver: true,
}),
])
).start();
// Start timer
const timer = setInterval(() => {
setRecordingTime(prev => prev + 1);
}, 1000);
recording._timer = timer;
} catch (err) {
console.error('Failed to start recording', err);
Alert.alert('Error', 'Failed to start recording');
}
};
const stopRecording = async () => {
if (!recording) return;
setIsRecording(false);
pulseAnim.stopAnimation();
pulseAnim.setValue(1);
if (recording._timer) {
clearInterval(recording._timer);
}
await recording.stopAndUnloadAsync();
const uri = recording.getURI();
if (uri) {
const newRecording: Recording = {
id: Date.now().toString(),
uri,
duration: recordingTime,
created: new Date(),
};
setRecordings(prev => [newRecording, ...prev]);
}
setRecording(null);
setRecordingTime(0);
};
const playRecording = async (recordingItem: Recording) => {
try {
if (sound && currentPlayingId === recordingItem.id) {
// Stop current playback
await sound.stopAsync();
await sound.unloadAsync();
setSound(null);
setIsPlaying(false);
setCurrentPlayingId(null);
return;
}
// Stop any existing sound
if (sound) {
await sound.unloadAsync();
}
const { sound: newSound } = await Audio.Sound.createAsync(
{ uri: recordingItem.uri },
{ shouldPlay: true }
);
setSound(newSound);
setIsPlaying(true);
setCurrentPlayingId(recordingItem.id);
newSound.setOnPlaybackStatusUpdate((status: AVPlaybackStatus) => {
if (status.isLoaded && status.didJustFinish) {
setIsPlaying(false);
setCurrentPlayingId(null);
}
});
} catch (error) {
console.error('Error playing recording:', error);
Alert.alert('Error', 'Could not play recording');
}
};
const deleteRecording = (id: string) => {
Alert.alert(
'Delete Recording',
'Are you sure you want to delete this recording?',
[
{ text: 'Cancel', style: 'cancel' },
{
text: 'Delete',
style: 'destructive',
onPress: () => {
setRecordings(prev => prev.filter(r => r.id !== id));
},
},
]
);
};
const formatDuration = (seconds: number) => {
const mins = Math.floor(seconds / 60);
const secs = seconds % 60;
return `${mins}:${secs.toString().padStart(2, '0')}`;
};
return (
<View style={styles.container}>
<Text style={styles.title}>🎤 Voice Recorder</Text>
{/* Recording Controls */}
<View style={styles.controlsContainer}>
<Animated.View style={[styles.recordButton, { transform: [{ scale: pulseAnim }] }]}>
<TouchableOpacity
style={[
styles.recordButtonInner,
isRecording && styles.recordingActive
]}
onPress={isRecording ? stopRecording : startRecording}
>
<Ionicons
name={isRecording ? 'stop' : 'mic'}
size={32}
color="white"
/>
</TouchableOpacity>
</Animated.View>
{isRecording && (
<Text style={styles.recordingTime}>
Recording: {formatDuration(recordingTime)}
</Text>
)}
</View>
{/* Recordings List */}
<View style={styles.recordingsList}>
<Text style={styles.listTitle}>Recordings ({recordings.length})</Text>
{recordings.map((item) => (
<View key={item.id} style={styles.recordingItem}>
<View style={styles.recordingInfo}>
<Text style={styles.recordingDate}>
{item.created.toLocaleDateString()} {item.created.toLocaleTimeString()}
</Text>
<Text style={styles.recordingDuration}>
Duration: {formatDuration(item.duration)}
</Text>
</View>
<View style={styles.recordingActions}>
<TouchableOpacity
style={styles.actionButton}
onPress={() => playRecording(item)}
>
<Ionicons
name={currentPlayingId === item.id && isPlaying ? 'pause' : 'play'}
size={20}
color="#007AFF"
/>
</TouchableOpacity>
<TouchableOpacity
style={styles.actionButton}
onPress={() => deleteRecording(item.id)}
>
<Ionicons name="trash" size={20} color="#FF3B30" />
</TouchableOpacity>
</View>
</View>
))}
{recordings.length === 0 && (
<Text style={styles.emptyText}>
No recordings yet. Tap the microphone to start!
</Text>
)}
</View>
</View>
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
padding: 20,
backgroundColor: '#F8F9FA',
},
title: {
fontSize: 24,
fontWeight: 'bold',
textAlign: 'center',
marginBottom: 30,
color: '#2C3E50',
},
controlsContainer: {
alignItems: 'center',
marginBottom: 40,
},
recordButton: {
marginBottom: 20,
},
recordButtonInner: {
width: 80,
height: 80,
borderRadius: 40,
backgroundColor: '#007AFF',
justifyContent: 'center',
alignItems: 'center',
shadowColor: '#000',
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.3,
shadowRadius: 8,
elevation: 8,
},
recordingActive: {
backgroundColor: '#FF3B30',
},
recordingTime: {
fontSize: 18,
color: '#FF3B30',
fontWeight: '600',
},
recordingsList: {
flex: 1,
},
listTitle: {
fontSize: 20,
fontWeight: 'bold',
marginBottom: 15,
color: '#2C3E50',
},
recordingItem: {
flexDirection: 'row',
backgroundColor: 'white',
padding: 15,
borderRadius: 12,
marginBottom: 10,
shadowColor: '#000',
shadowOffset: { width: 0, height: 1 },
shadowOpacity: 0.1,
shadowRadius: 2,
elevation: 2,
},
recordingInfo: {
flex: 1,
},
recordingDate: {
fontSize: 16,
fontWeight: '600',
color: '#2C3E50',
marginBottom: 4,
},
recordingDuration: {
fontSize: 14,
color: '#7F8C8D',
},
recordingActions: {
flexDirection: 'row',
alignItems: 'center',
},
actionButton: {
padding: 10,
marginLeft: 10,
},
emptyText: {
textAlign: 'center',
color: '#7F8C8D',
fontSize: 16,
marginTop: 40,
},
});
export default VoiceRecorder;
// components/MusicPlayer.tsx
import React, { useState, useEffect } from 'react';
import {
View,
Text,
TouchableOpacity,
StyleSheet,
Image,
Slider,
Dimensions,
} from 'react-native';
import { Audio, AVPlaybackStatus } from 'expo-av';
import { Ionicons } from '@expo/vector-icons';
import { LinearGradient } from 'expo-linear-gradient';
interface Track {
id: string;
title: string;
artist: string;
uri: string;
artwork?: string;
duration?: number;
}
const sampleTracks: Track[] = [
{
id: '1',
title: 'Chill Vibes',
artist: 'AI Generated',
uri: 'https://www.soundjay.com/misc/sounds/bell-ringing-05.wav',
artwork: 'https://picsum.photos/300/300?random=1',
},
{
id: '2',
title: 'Digital Dreams',
artist: 'Synthetic Sounds',
uri: 'https://www.soundjay.com/misc/sounds/bell-ringing-05.wav',
artwork: 'https://picsum.photos/300/300?random=2',
},
];
export const MusicPlayer: React.FC = () => {
const [sound, setSound] = useState<Audio.Sound | null>(null);
const [isPlaying, setIsPlaying] = useState(false);
const [currentTrack, setCurrentTrack] = useState(0);
const [position, setPosition] = useState(0);
const [duration, setDuration] = useState(0);
const [isLoading, setIsLoading] = useState(false);
useEffect(() => {
return () => {
if (sound) {
sound.unloadAsync();
}
};
}, [sound]);
const loadTrack = async (trackIndex: number) => {
try {
setIsLoading(true);
if (sound) {
await sound.unloadAsync();
}
const { sound: newSound } = await Audio.Sound.createAsync(
{ uri: sampleTracks[trackIndex].uri },
{ shouldPlay: false }
);
setSound(newSound);
setCurrentTrack(trackIndex);
newSound.setOnPlaybackStatusUpdate((status: AVPlaybackStatus) => {
if (status.isLoaded) {
setPosition(status.positionMillis || 0);
setDuration(status.durationMillis || 0);
setIsPlaying(status.isPlaying);
if (status.didJustFinish) {
playNext();
}
}
});
setIsLoading(false);
} catch (error) {
console.error('Error loading track:', error);
setIsLoading(false);
}
};
const playPause = async () => {
if (!sound) {
await loadTrack(currentTrack);
return;
}
if (isPlaying) {
await sound.pauseAsync();
} else {
await sound.playAsync();
}
};
const playNext = async () => {
const nextTrack = (currentTrack + 1) % sampleTracks.length;
await loadTrack(nextTrack);
if (sound) {
await sound.playAsync();
}
};
const playPrevious = async () => {
const prevTrack = currentTrack === 0 ? sampleTracks.length - 1 : currentTrack - 1;
await loadTrack(prevTrack);
if (sound) {
await sound.playAsync();
}
};
const onSeek = async (value: number) => {
if (sound) {
await sound.setPositionAsync(value * duration);
}
};
const formatTime = (millis: number) => {
const totalSeconds = Math.floor(millis / 1000);
const minutes = Math.floor(totalSeconds / 60);
const seconds = totalSeconds % 60;
return `${minutes}:${seconds.toString().padStart(2, '0')}`;
};
const track = sampleTracks[currentTrack];
return (
<LinearGradient
colors={['#667eea', '#764ba2']}
style={styles.container}
>
{/* Album Artwork */}
<View style={styles.artworkContainer}>
<Image
source={{ uri: track.artwork }}
style={styles.artwork}
/>
<View style={styles.artworkOverlay} />
</View>
{/* Track Info */}
<View style={styles.trackInfo}>
<Text style={styles.trackTitle}>{track.title}</Text>
<Text style={styles.trackArtist}>{track.artist}</Text>
</View>
{/* Progress Bar */}
<View style={styles.progressContainer}>
<Text style={styles.timeText}>{formatTime(position)}</Text>
<Slider
style={styles.progressBar}
value={duration > 0 ? position / duration : 0}
onValueChange={onSeek}
minimumTrackTintColor="#FFFFFF"
maximumTrackTintColor="rgba(255, 255, 255, 0.3)"
thumbStyle={styles.sliderThumb}
/>
<Text style={styles.timeText}>{formatTime(duration)}</Text>
</View>
{/* Controls */}
<View style={styles.controls}>
<TouchableOpacity style={styles.controlButton} onPress={playPrevious}>
<Ionicons name="play-skip-back" size={30} color="white" />
</TouchableOpacity>
<TouchableOpacity
style={[styles.controlButton, styles.playButton]}
onPress={playPause}
disabled={isLoading}
>
<Ionicons
name={isLoading ? 'hourglass' : isPlaying ? 'pause' : 'play'}
size={40}
color="white"
/>
</TouchableOpacity>
<TouchableOpacity style={styles.controlButton} onPress={playNext}>
<Ionicons name="play-skip-forward" size={30} color="white" />
</TouchableOpacity>
</View>
{/* Playlist */}
<View style={styles.playlist}>
<Text style={styles.playlistTitle}>Up Next</Text>
{sampleTracks.map((item, index) => (
<TouchableOpacity
key={item.id}
style={[
styles.playlistItem,
index === currentTrack && styles.currentTrackItem
]}
onPress={() => loadTrack(index)}
>
<View style={styles.playlistItemInfo}>
<Text
style={[
styles.playlistItemTitle,
index === currentTrack && styles.currentTrackText
]}
>
{item.title}
</Text>
<Text
style={[
styles.playlistItemArtist,
index === currentTrack && styles.currentTrackText
]}
>
{item.artist}
</Text>
</View>
{index === currentTrack && isPlaying && (
<Ionicons name="volume-high" size={20} color="#FFD700" />
)}
</TouchableOpacity>
))}
</View>
</LinearGradient>
);
};
const { width } = Dimensions.get('window');
const styles = StyleSheet.create({
container: {
flex: 1,
paddingTop: 60,
paddingHorizontal: 20,
},
artworkContainer: {
alignItems: 'center',
marginBottom: 30,
},
artwork: {
width: width * 0.8,
height: width * 0.8,
borderRadius: 20,
},
artworkOverlay: {
position: 'absolute',
top: 0,
left: 0,
right: 0,
bottom: 0,
backgroundColor: 'rgba(0, 0, 0, 0.1)',
borderRadius: 20,
},
trackInfo: {
alignItems: 'center',
marginBottom: 30,
},
trackTitle: {
fontSize: 24,
fontWeight: 'bold',
color: 'white',
marginBottom: 8,
textAlign: 'center',
},
trackArtist: {
fontSize: 18,
color: 'rgba(255, 255, 255, 0.8)',
textAlign: 'center',
},
progressContainer: {
flexDirection: 'row',
alignItems: 'center',
marginBottom: 30,
},
progressBar: {
flex: 1,
marginHorizontal: 10,
},
timeText: {
color: 'white',
fontSize: 14,
minWidth: 40,
},
sliderThumb: {
backgroundColor: '#FFD700',
},
controls: {
flexDirection: 'row',
justifyContent: 'center',
alignItems: 'center',
marginBottom: 40,
},
controlButton: {
padding: 15,
},
playButton: {
backgroundColor: 'rgba(255, 255, 255, 0.2)',
borderRadius: 50,
marginHorizontal: 20,
padding: 20,
},
playlist: {
flex: 1,
},
playlistTitle: {
fontSize: 18,
fontWeight: 'bold',
color: 'white',
marginBottom: 15,
},
playlistItem: {
flexDirection: 'row',
alignItems: 'center',
paddingVertical: 12,
paddingHorizontal: 15,
backgroundColor: 'rgba(255, 255, 255, 0.1)',
borderRadius: 12,
marginBottom: 8,
},
currentTrackItem: {
backgroundColor: 'rgba(255, 215, 0, 0.2)',
},
playlistItemInfo: {
flex: 1,
},
playlistItemTitle: {
fontSize: 16,
color: 'white',
marginBottom: 4,
},
playlistItemArtist: {
fontSize: 14,
color: 'rgba(255, 255, 255, 0.7)',
},
currentTrackText: {
color: '#FFD700',
},
});
export default MusicPlayer;
// components/AudioVisualizer.tsx
import React, { useEffect, useState } from 'react';
import { View, StyleSheet, Animated, Dimensions } from 'react-native';
interface AudioVisualizerProps {
isPlaying: boolean;
audioData?: number[];
}
export const AudioVisualizer: React.FC<AudioVisualizerProps> = ({
isPlaying,
audioData = [],
}) => {
const [animations] = useState(
Array.from({ length: 20 }, () => new Animated.Value(0.3))
);
useEffect(() => {
if (isPlaying) {
startAnimation();
} else {
stopAnimation();
}
}, [isPlaying]);
const startAnimation = () => {
const animateBar = (index: number) => {
const randomHeight = Math.random() * 0.8 + 0.2;
Animated.timing(animations[index], {
toValue: randomHeight,
duration: 150 + Math.random() * 300,
useNativeDriver: false,
}).start(() => {
if (isPlaying) {
animateBar(index);
}
});
};
animations.forEach((_, index) => {
setTimeout(() => animateBar(index), index * 50);
});
};
const stopAnimation = () => {
animations.forEach((anim) => {
Animated.timing(anim, {
toValue: 0.1,
duration: 300,
useNativeDriver: false,
}).start();
});
};
return (
<View style={styles.container}>
{animations.map((anim, index) => (
<Animated.View
key={index}
style={[
styles.bar,
{
height: anim.interpolate({
inputRange: [0, 1],
outputRange: [4, 80],
}),
backgroundColor: anim.interpolate({
inputRange: [0, 0.5, 1],
outputRange: ['#4ECDC4', '#44A08D', '#096A5B'],
}),
},
]}
/>
))}
</View>
);
};
const styles = StyleSheet.create({
container: {
flexDirection: 'row',
alignItems: 'flex-end',
justifyContent: 'space-between',
height: 100,
paddingHorizontal: 20,
},
bar: {
width: 4,
borderRadius: 2,
marginHorizontal: 1,
},
});
export default AudioVisualizer;
// services/musicAI.ts
import OpenAI from 'openai';
interface MusicPreference {
genres: string[];
moods: string[];
activity: string;
timeOfDay: string;
}
interface TrackRecommendation {
title: string;
artist: string;
genre: string;
mood: string;
confidence: number;
reason: string;
}
class MusicAI {
private openai: OpenAI;
constructor(apiKey: string) {
this.openai = new OpenAI({ apiKey });
}
async getRecommendations(
preferences: MusicPreference,
listeningHistory: string[]
): Promise<TrackRecommendation[]> {
const prompt = `
Based on these music preferences:
- Genres: ${preferences.genres.join(', ')}
- Moods: ${preferences.moods.join(', ')}
- Activity: ${preferences.activity}
- Time of day: ${preferences.timeOfDay}
- Recent listening: ${listeningHistory.slice(-10).join(', ')}
Recommend 10 songs that would be perfect for this user.
Return as JSON array with: title, artist, genre, mood, confidence (0-1), reason
`;
try {
const response = await this.openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }],
temperature: 0.7,
});
const recommendations = JSON.parse(response.choices[0].message.content || '[]');
return recommendations;
} catch (error) {
console.error('AI recommendation error:', error);
return [];
}
}
async analyzeMood(audioTranscript: string): Promise<string> {
const prompt = `
Analyze the mood and emotional content of this audio transcript:
"${audioTranscript}"
Return just the primary mood (happy, sad, energetic, calm, romantic, angry, etc.)
`;
try {
const response = await this.openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: prompt }],
temperature: 0.3,
});
return response.choices[0].message.content?.trim() || 'neutral';
} catch (error) {
console.error('Mood analysis error:', error);
return 'neutral';
}
}
async generatePlaylistName(
tracks: string[],
theme?: string
): Promise<string> {
const prompt = `
Create a creative playlist name for these tracks:
${tracks.join(', ')}
${theme ? `Theme: ${theme}` : ''}
Return just the playlist name, make it catchy and descriptive.
`;
try {
const response = await this.openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: prompt }],
temperature: 0.8,
});
return response.choices[0].message.content?.trim() || 'My Playlist';
} catch (error) {
console.error('Playlist name generation error:', error);
return 'My Playlist';
}
}
}
export default MusicAI;
// hooks/useAudioEffects.ts
import { useState, useCallback } from 'react';
import { Audio } from 'expo-av';
interface AudioEffects {
reverb: number;
echo: number;
bass: number;
treble: number;
volume: number;
}
export const useAudioEffects = () => {
const [effects, setEffects] = useState<AudioEffects>({
reverb: 0,
echo: 0,
bass: 0,
treble: 0,
volume: 1,
});
const applyEffects = useCallback(async (sound: Audio.Sound) => {
try {
await sound.setVolumeAsync(effects.volume);
// In a real app, you'd use audio processing libraries
// like react-native-audio-toolkit for advanced effects
console.log('Effects applied:', effects);
} catch (error) {
console.error('Error applying effects:', error);
}
}, [effects]);
const updateEffect = useCallback((effect: keyof AudioEffects, value: number) => {
setEffects(prev => ({ ...prev, [effect]: value }));
}, []);
const resetEffects = useCallback(() => {
setEffects({
reverb: 0,
echo: 0,
bass: 0,
treble: 0,
volume: 1,
});
}, []);
return {
effects,
updateEffect,
applyEffects,
resetEffects,
};
};
In this lesson, you learned:
Code with AI: Try building these advanced audio features.
Prompts to try:
Audio apps offer incredible opportunities for creativity and user engagement - master these techniques and build the next hit music app!