By the end of this lesson, you will be able to:
ℹ️ Info Definition: AR filters and camera effects overlay digital content onto the real world through your device's camera. Apps like Instagram, Snapchat, and TikTok use these technologies to create engaging, shareable content experiences.
Camera-based apps dominate mobile usage:
App Category | Examples | Key Features |
---|---|---|
Social Camera | Instagram, Snapchat | Filters, stories, sharing |
Photo Editing | VSCO, Lightroom | Advanced editing, presets |
AR Entertainment | TikTok, Lens Studio | Face tracking, 3D effects |
Utility Camera | CamScanner, Google Lens | Document scanning, object recognition |
Beauty Apps | FaceTune, YouCam | Skin smoothing, makeup simulation |
💡 Market Insight: The global AR market is projected to reach $198 billion by 2025, with mobile AR filters being a major driver!
# Core camera libraries
npx expo install expo-camera
npx expo install expo-media-library
npx expo install expo-face-detector
# For advanced image processing
npm install react-native-vision-camera
npm install vision-camera-face-detector
# For filters and effects
npm install react-native-image-filter-kit
npm install react-native-svg
npm install gl-react-native
// utils/cameraPermissions.ts
import { Camera } from 'expo-camera';
import * as MediaLibrary from 'expo-media-library';
export const requestCameraPermissions = async () => {
try {
const { status: cameraStatus } = await Camera.requestCameraPermissionsAsync();
const { status: mediaStatus } = await MediaLibrary.requestPermissionsAsync();
if (cameraStatus !== 'granted' || mediaStatus !== 'granted') {
throw new Error('Camera permissions not granted');
}
return { camera: cameraStatus, media: mediaStatus };
} catch (error) {
console.error('Error requesting permissions:', error);
return { camera: 'denied', media: 'denied' };
}
};
// components/ARCamera.tsx
import React, { useState, useRef, useEffect } from 'react';
import {
View,
Text,
TouchableOpacity,
StyleSheet,
Alert,
Dimensions,
Animated,
} from 'react-native';
import { Camera, CameraType } from 'expo-camera';
import * as FaceDetector from 'expo-face-detector';
import * as MediaLibrary from 'expo-media-library';
import { Ionicons } from '@expo/vector-icons';
import { LinearGradient } from 'expo-linear-gradient';
interface DetectedFace {
faceID: number;
bounds: {
origin: { x: number; y: number };
size: { width: number; height: number };
};
leftEyeOpenProbability?: number;
rightEyeOpenProbability?: number;
smilingProbability?: number;
}
interface Filter {
id: string;
name: string;
icon: string;
effect: string;
}
const filters: Filter[] = [
{ id: 'none', name: 'Original', icon: '🔄', effect: 'none' },
{ id: 'vintage', name: 'Vintage', icon: '📸', effect: 'sepia' },
{ id: 'bw', name: 'B&W', icon: '⚫', effect: 'grayscale' },
{ id: 'warm', name: 'Warm', icon: '☀️', effect: 'warm' },
{ id: 'cool', name: 'Cool', icon: '❄️', effect: 'cool' },
{ id: 'neon', name: 'Neon', icon: '✨', effect: 'neon' },
];
export const ARCamera: React.FC = () => {
const [hasPermission, setHasPermission] = useState<boolean | null>(null);
const [type, setType] = useState(CameraType.front);
const [isRecording, setIsRecording] = useState(false);
const [faces, setFaces] = useState<DetectedFace[]>([]);
const [selectedFilter, setSelectedFilter] = useState('none');
const [flashMode, setFlashMode] = useState(false);
const [zoom, setZoom] = useState(0);
const [focusPoint, setFocusPoint] = useState({ x: 0.5, y: 0.5 });
const cameraRef = useRef<Camera>(null);
const [shutterAnimation] = useState(new Animated.Value(0));
useEffect(() => {
(async () => {
const { status } = await Camera.requestCameraPermissionsAsync();
setHasPermission(status === 'granted');
})();
}, []);
const takePicture = async () => {
if (cameraRef.current) {
try {
// Shutter animation
Animated.sequence([
Animated.timing(shutterAnimation, {
toValue: 1,
duration: 100,
useNativeDriver: true,
}),
Animated.timing(shutterAnimation, {
toValue: 0,
duration: 100,
useNativeDriver: true,
}),
]).start();
const photo = await cameraRef.current.takePictureAsync({
quality: 0.8,
base64: true,
skipProcessing: false,
});
// Apply selected filter
const processedPhoto = await applyFilter(photo.uri, selectedFilter);
// Save to media library
await MediaLibrary.saveToLibraryAsync(processedPhoto || photo.uri);
Alert.alert('Success!', 'Photo saved with filter applied!');
} catch (error) {
console.error('Error taking picture:', error);
Alert.alert('Error', 'Failed to take picture');
}
}
};
const applyFilter = async (uri: string, filterType: string): Promise<string | null> => {
// In a real app, you'd use image processing libraries
// or send to a backend service for AI-powered filtering
console.log(`Applying ${filterType} filter to ${uri}`);
// Placeholder for filter processing
return uri;
};
const startRecording = async () => {
if (cameraRef.current && !isRecording) {
try {
setIsRecording(true);
const video = await cameraRef.current.recordAsync({
quality: Camera.Constants.VideoQuality['720p'],
maxDuration: 60,
});
await MediaLibrary.saveToLibraryAsync(video.uri);
Alert.alert('Success!', 'Video saved!');
} catch (error) {
console.error('Error recording video:', error);
} finally {
setIsRecording(false);
}
}
};
const stopRecording = () => {
if (cameraRef.current && isRecording) {
cameraRef.current.stopRecording();
setIsRecording(false);
}
};
const handleFacesDetected = ({ faces }: { faces: DetectedFace[] }) => {
setFaces(faces);
};
const flipCamera = () => {
setType(type === CameraType.back ? CameraType.front : CameraType.back);
};
const renderFaceOverlays = () => {
return faces.map((face, index) => {
const { bounds, smilingProbability = 0 } = face;
const isSmiling = smilingProbability > 0.7;
return (
<View
key={index}
style={[
styles.faceOverlay,
{
left: bounds.origin.x,
top: bounds.origin.y,
width: bounds.size.width,
height: bounds.size.height,
borderColor: isSmiling ? '#4CAF50' : '#2196F3',
},
]}
>
{isSmiling && (
<Text style={styles.emojiOverlay}>😊</Text>
)}
</View>
);
});
};
const renderFilterSelector = () => {
return (
<View style={styles.filterContainer}>
{filters.map((filter) => (
<TouchableOpacity
key={filter.id}
style={[
styles.filterButton,
selectedFilter === filter.id && styles.selectedFilter,
]}
onPress={() => setSelectedFilter(filter.id)}
>
<Text style={styles.filterIcon}>{filter.icon}</Text>
<Text style={styles.filterName}>{filter.name}</Text>
</TouchableOpacity>
))}
</View>
);
};
if (hasPermission === null) {
return <View style={styles.container}><Text>Requesting camera permission...</Text></View>;
}
if (hasPermission === false) {
return (
<View style={styles.container}>
<Text style={styles.permissionText}>Camera permission denied</Text>
</View>
);
}
return (
<View style={styles.container}>
<Camera
ref={cameraRef}
style={styles.camera}
type={type}
flashMode={flashMode ? 'torch' : 'off'}
zoom={zoom}
onFacesDetected={handleFacesDetected}
faceDetectorSettings={{
mode: FaceDetector.FaceDetectorMode.fast,
detectLandmarks: FaceDetector.FaceDetectorLandmarks.all,
runClassifications: FaceDetector.FaceDetectorClassifications.all,
}}
>
{/* Face Detection Overlays */}
{renderFaceOverlays()}
{/* Top Controls */}
<View style={styles.topControls}>
<TouchableOpacity
style={styles.controlButton}
onPress={() => setFlashMode(!flashMode)}
>
<Ionicons
name={flashMode ? 'flash' : 'flash-off'}
size={24}
color="white"
/>
</TouchableOpacity>
<TouchableOpacity style={styles.controlButton} onPress={flipCamera}>
<Ionicons name="camera-reverse" size={24} color="white" />
</TouchableOpacity>
</View>
{/* Filter Selection */}
{renderFilterSelector()}
{/* Bottom Controls */}
<View style={styles.bottomControls}>
<TouchableOpacity
style={styles.captureButton}
onPress={isRecording ? stopRecording : takePicture}
>
<View
style={[
styles.captureButtonInner,
isRecording && styles.recordingButton,
]}
/>
</TouchableOpacity>
<TouchableOpacity
style={styles.recordButton}
onPress={startRecording}
disabled={isRecording}
>
<Ionicons
name="videocam"
size={28}
color={isRecording ? '#FF3B30' : 'white'}
/>
</TouchableOpacity>
</View>
{/* Shutter Animation */}
<Animated.View
style={[
styles.shutterOverlay,
{
opacity: shutterAnimation,
},
]}
/>
</Camera>
{/* Face Detection Info */}
{faces.length > 0 && (
<View style={styles.faceInfo}>
<Text style={styles.faceInfoText}>
{faces.length} face{faces.length > 1 ? 's' : ''} detected
</Text>
</View>
)}
</View>
);
};
const { width, height } = Dimensions.get('window');
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
camera: {
flex: 1,
},
topControls: {
position: 'absolute',
top: 50,
left: 20,
right: 20,
flexDirection: 'row',
justifyContent: 'space-between',
},
controlButton: {
backgroundColor: 'rgba(0, 0, 0, 0.5)',
padding: 12,
borderRadius: 25,
},
filterContainer: {
position: 'absolute',
bottom: 120,
left: 0,
right: 0,
flexDirection: 'row',
paddingHorizontal: 10,
},
filterButton: {
alignItems: 'center',
marginHorizontal: 8,
padding: 10,
backgroundColor: 'rgba(0, 0, 0, 0.5)',
borderRadius: 12,
minWidth: 60,
},
selectedFilter: {
backgroundColor: 'rgba(255, 255, 255, 0.3)',
borderWidth: 2,
borderColor: '#FFD700',
},
filterIcon: {
fontSize: 20,
marginBottom: 4,
},
filterName: {
color: 'white',
fontSize: 10,
fontWeight: '600',
},
bottomControls: {
position: 'absolute',
bottom: 30,
left: 0,
right: 0,
flexDirection: 'row',
justifyContent: 'center',
alignItems: 'center',
},
captureButton: {
width: 80,
height: 80,
borderRadius: 40,
backgroundColor: 'white',
justifyContent: 'center',
alignItems: 'center',
marginHorizontal: 30,
},
captureButtonInner: {
width: 70,
height: 70,
borderRadius: 35,
backgroundColor: 'white',
borderWidth: 3,
borderColor: 'black',
},
recordingButton: {
backgroundColor: '#FF3B30',
borderRadius: 8,
},
recordButton: {
position: 'absolute',
right: 40,
padding: 15,
backgroundColor: 'rgba(0, 0, 0, 0.5)',
borderRadius: 25,
},
faceOverlay: {
position: 'absolute',
borderWidth: 2,
borderRadius: 8,
justifyContent: 'center',
alignItems: 'center',
},
emojiOverlay: {
fontSize: 30,
backgroundColor: 'transparent',
},
shutterOverlay: {
...StyleSheet.absoluteFillObject,
backgroundColor: 'white',
},
faceInfo: {
position: 'absolute',
top: 100,
left: 20,
backgroundColor: 'rgba(0, 0, 0, 0.7)',
padding: 10,
borderRadius: 8,
},
faceInfoText: {
color: 'white',
fontSize: 14,
},
permissionText: {
color: 'white',
fontSize: 18,
textAlign: 'center',
marginTop: 100,
},
});
export default ARCamera;
// services/filterAI.ts
import OpenAI from 'openai';
interface FilterParameters {
brightness: number;
contrast: number;
saturation: number;
hue: number;
temperature: number;
vignette: number;
}
class FilterAI {
private openai: OpenAI;
constructor(apiKey: string) {
this.openai = new OpenAI({ apiKey });
}
async generateFilter(
imageDescription: string,
mood: string,
style: string
): Promise<FilterParameters> {
const prompt = `
Create a photo filter for an image with:
- Content: ${imageDescription}
- Desired mood: ${mood}
- Style: ${style}
Return JSON with filter parameters (0-1 range):
{
"brightness": 0.1,
"contrast": 0.2,
"saturation": 0.3,
"hue": 0.0,
"temperature": 0.1,
"vignette": 0.2
}
`;
try {
const response = await this.openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: prompt }],
temperature: 0.7,
});
return JSON.parse(response.choices[0].message.content || '{}');
} catch (error) {
console.error('Filter generation error:', error);
return {
brightness: 0,
contrast: 0,
saturation: 0,
hue: 0,
temperature: 0,
vignette: 0,
};
}
}
async analyzeImageContent(imageUri: string): Promise<string> {
// In a real app, you'd use computer vision APIs
// to analyze the image content
return 'landscape with mountains and sky';
}
async suggestFilters(
imageContent: string,
timeOfDay: string,
weather: string
): Promise<string[]> {
const prompt = `
Suggest 5 photo filter names for:
- Image content: ${imageContent}
- Time: ${timeOfDay}
- Weather: ${weather}
Return creative filter names that would enhance this photo.
`;
try {
const response = await this.openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: prompt }],
temperature: 0.8,
});
const content = response.choices[0].message.content || '';
return content.split('\n').filter(line => line.trim());
} catch (error) {
console.error('Filter suggestion error:', error);
return ['Vintage', 'Warm', 'Cool', 'Dramatic', 'Soft'];
}
}
}
export default FilterAI;
// components/FaceTracker.tsx
import React, { useState, useEffect } from 'react';
import { View, StyleSheet, Text } from 'react-native';
import * as FaceDetector from 'expo-face-detector';
import Svg, { Circle, Path, G } from 'react-native-svg';
interface FaceData {
bounds: {
origin: { x: number; y: number };
size: { width: number; height: number };
};
leftEyeOpenProbability?: number;
rightEyeOpenProbability?: number;
smilingProbability?: number;
faceAngle?: number;
}
interface FaceTrackerProps {
faces: FaceData[];
cameraWidth: number;
cameraHeight: number;
}
export const FaceTracker: React.FC<FaceTrackerProps> = ({
faces,
cameraWidth,
cameraHeight,
}) => {
const renderFaceEffects = (face: FaceData, index: number) => {
const { bounds, smilingProbability = 0, leftEyeOpenProbability = 1, rightEyeOpenProbability = 1 } = face;
const isSmiling = smilingProbability > 0.7;
const leftEyeClosed = leftEyeOpenProbability < 0.5;
const rightEyeClosed = rightEyeOpenProbability < 0.5;
const isWinking = (leftEyeClosed && !rightEyeClosed) || (!leftEyeClosed && rightEyeClosed);
const eyesClosed = leftEyeClosed && rightEyeClosed;
const faceX = bounds.origin.x;
const faceY = bounds.origin.y;
const faceWidth = bounds.size.width;
const faceHeight = bounds.size.height;
return (
<G key={index}>
{/* Face outline */}
<Circle
cx={faceX + faceWidth / 2}
cy={faceY + faceHeight / 2}
r={Math.max(faceWidth, faceHeight) / 2}
fill="none"
stroke={isSmiling ? '#4CAF50' : '#2196F3'}
strokeWidth="3"
strokeDasharray="10,5"
/>
{/* Eyes */}
<Circle
cx={faceX + faceWidth * 0.35}
cy={faceY + faceHeight * 0.4}
r="8"
fill={leftEyeClosed ? '#FF5722' : '#4CAF50'}
/>
<Circle
cx={faceX + faceWidth * 0.65}
cy={faceY + faceHeight * 0.4}
r="8"
fill={rightEyeClosed ? '#FF5722' : '#4CAF50'}
/>
{/* Smile indicator */}
{isSmiling && (
<Path
d={`M ${faceX + faceWidth * 0.3} ${faceY + faceHeight * 0.7} Q ${faceX + faceWidth * 0.5} ${faceY + faceHeight * 0.85} ${faceX + faceWidth * 0.7} ${faceY + faceHeight * 0.7}`}
fill="none"
stroke="#4CAF50"
strokeWidth="4"
strokeLinecap="round"
/>
)}
{/* Special effects */}
{isWinking && (
<Text
x={faceX + faceWidth / 2}
y={faceY - 20}
fontSize="24"
textAnchor="middle"
fill="#FFD700"
>
😉
</Text>
)}
{eyesClosed && (
<Text
x={faceX + faceWidth / 2}
y={faceY - 20}
fontSize="24"
textAnchor="middle"
fill="#FFD700"
>
😴
</Text>
)}
</G>
);
};
return (
<Svg
style={StyleSheet.absoluteFill}
width={cameraWidth}
height={cameraHeight}
>
{faces.map((face, index) => renderFaceEffects(face, index))}
</Svg>
);
};
export default FaceTracker;
// services/beautyAI.ts
interface BeautySettings {
skinSmoothing: number;
brightenEyes: number;
whiteTeeth: number;
slimFace: number;
enlargeEyes: number;
lipstick: { enabled: boolean; color: string; intensity: number };
blush: { enabled: boolean; color: string; intensity: number };
}
class BeautyAI {
async applyBeautyFilter(
imageUri: string,
settings: BeautySettings
): Promise<string> {
// In a real app, this would use computer vision libraries
// or cloud services like Google Vision AI, AWS Rekognition
// or specialized beauty filter APIs
console.log('Applying beauty filter with settings:', settings);
// Placeholder for actual image processing
return imageUri;
}
async detectFacialFeatures(imageUri: string) {
// Detect key facial landmarks for beauty enhancement
return {
leftEye: { x: 100, y: 150 },
rightEye: { x: 200, y: 150 },
nose: { x: 150, y: 200 },
mouth: { x: 150, y: 250 },
jawline: [/* array of points */],
eyebrows: [/* array of points */],
};
}
generateBeautyPresets(): BeautySettings[] {
return [
{
skinSmoothing: 0.3,
brightenEyes: 0.2,
whiteTeeth: 0.1,
slimFace: 0.0,
enlargeEyes: 0.0,
lipstick: { enabled: false, color: '#FF6B6B', intensity: 0 },
blush: { enabled: false, color: '#FFB6C1', intensity: 0 },
},
{
skinSmoothing: 0.5,
brightenEyes: 0.4,
whiteTeeth: 0.3,
slimFace: 0.2,
enlargeEyes: 0.1,
lipstick: { enabled: true, color: '#DC143C', intensity: 0.3 },
blush: { enabled: true, color: '#FFC0CB', intensity: 0.2 },
},
];
}
}
export default BeautyAI;
// hooks/useFilterPerformance.ts
import { useState, useCallback, useMemo } from 'react';
interface PerformanceMetrics {
fps: number;
processingTime: number;
memoryUsage: number;
}
export const useFilterPerformance = () => {
const [metrics, setMetrics] = useState<PerformanceMetrics>({
fps: 30,
processingTime: 0,
memoryUsage: 0,
});
const [isLowPowerMode, setIsLowPowerMode] = useState(false);
const optimizeForDevice = useCallback((deviceSpecs: any) => {
// Adjust filter quality based on device capabilities
if (deviceSpecs.ram < 4000) {
setIsLowPowerMode(true);
}
}, []);
const filterConfig = useMemo(() => {
return {
quality: isLowPowerMode ? 'medium' : 'high',
fps: isLowPowerMode ? 24 : 30,
enableHeavyEffects: !isLowPowerMode,
cacheSize: isLowPowerMode ? 50 : 100,
};
}, [isLowPowerMode]);
const measurePerformance = useCallback((startTime: number) => {
const endTime = Date.now();
const processingTime = endTime - startTime;
setMetrics(prev => ({
...prev,
processingTime,
fps: Math.round(1000 / processingTime),
}));
}, []);
return {
metrics,
filterConfig,
optimizeForDevice,
measurePerformance,
isLowPowerMode,
};
};
In this lesson, you learned:
Code with AI: Try building these advanced camera features.
Prompts to try:
AR and camera effects are the future of mobile interaction - master these technologies to build the next viral social app!